diff --git a/.circleci/README.md b/.circleci/README.md index 78dfeebaa200f..c717b017b6690 100644 --- a/.circleci/README.md +++ b/.circleci/README.md @@ -11,12 +11,12 @@ This image is now built alongside other images in [agent-buildimages](https://gi Once you have created a new image by building a new version of agent-buildimages, you can test your modification with the associated invoke task: ```bash -invoke -e pipeline.update-buildimages --image-tag v12345678-c0mm1t5 +invoke -e buildimages.update --image-tag v12345678-c0mm1t5 ``` This will update the configuration of circleci and gitlab to use the __test version__ of these images. Once your test is successful, you can either move the `_test_version` from files or invoke ```bash -invoke -e pipeline.update-buildimages --image-tag v12345678-c0mm1t5 --no-test-version +invoke -e buildimages.update --image-tag v12345678-c0mm1t5 --no-test-version ``` If everything is green, get a review and merge the PR. diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index e23769c569ec7..455a51e447c13 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -42,7 +42,7 @@ /repository.datadog.yml @DataDog/agent-devx-infra /generate_tools.go @DataDog/agent-devx-infra /service.datadog.yaml @DataDog/agent-delivery -/static-analysis.datadog.yaml @DataDog/software-integrity-and-trust @DataDog/agent-devx-infra +/static-analysis.datadog.yml @DataDog/software-integrity-and-trust @DataDog/agent-devx-infra /.circleci/ @DataDog/agent-devx-infra @@ -52,8 +52,6 @@ /.github/workflows/serverless-benchmarks.yml @DataDog/serverless @Datadog/serverless-aws /.github/workflows/serverless-binary-size.yml @DataDog/serverless @Datadog/serverless-aws /.github/workflows/serverless-integration.yml @DataDog/serverless @Datadog/serverless-aws -/.github/workflows/serverless-vuln-scan.yml @DataDog/serverless @Datadog/serverless-aws -/.github/workflows/windows-*.yml @DataDog/windows-agent /.github/workflows/cws-btfhub-sync.yml @DataDog/agent-security /.github/workflows/gohai.yml @DataDog/agent-shared-components /.github/workflows/go-update-commenter.yml @DataDog/agent-shared-components @@ -75,7 +73,6 @@ /.gitlab/e2e/* @DataDog/agent-devx-infra @DataDog/agent-devx-loops /.gitlab/e2e_install_packages/* @DataDog/agent-delivery /.gitlab/e2e_pre_test/* @DataDog/agent-devx-infra @DataDog/agent-devx-loops -/.gitlab/junit_upload/* @DataDog/agent-devx-infra /.gitlab/kernel_matrix_testing/* @DataDog/agent-devx-infra @DataDog/ebpf-platform /.gitlab/lint/* @DataDog/agent-devx-infra /.gitlab/maintenance_jobs/* @DataDog/agent-devx-infra @DataDog/agent-devx-loops @@ -129,7 +126,6 @@ /.gitlab/deploy_packages/e2e.yml @DataDog/agent-devx-loops @DataDog/agent-e2e-testing @DataDog/fleet /.gitlab/deps_build/ @DataDog/ebpf-platform @DataDog/agent-delivery @DataDog/windows-agent -/.gitlab/windows_python_build.yml @DataDog/windows-agent /.gitlab/kitchen_* @DataDog/container-ecosystems @DataDog/agent-delivery /.gitlab/kitchen_testing/windows.yml @DataDog/container-ecosystems @DataDog/agent-delivery @DataDog/windows-agent @@ -139,19 +135,17 @@ /.gitlab/common/test_infra_version.yml @DataDog/agent-devx-loops @DataDog/agent-devx-infra /.gitlab/e2e/e2e.yml @DataDog/container-integrations @DataDog/agent-devx-loops -/.gitlab/e2e/install_packages @DataDog/container-ecosystems @DataDog/agent-delivery +/.gitlab/e2e_install_packages @DataDog/container-ecosystems @DataDog/agent-delivery /.gitlab/container_build/fakeintake.yml @DataDog/agent-e2e-testing @DataDog/agent-devx-loops /.gitlab/binary_build/fakeintake.yml @DataDog/agent-e2e-testing @DataDog/agent-devx-loops /.gitlab/functional_test/security_agent.yml @DataDog/agent-security @DataDog/agent-devx-infra /.gitlab/functional_test/serverless.yml @DataDog/serverless @Datadog/serverless-aws @DataDog/agent-devx-infra -/.gitlab/functional_test_cleanup.yml @DataDog/agent-security @DataDog/windows-kernel-integrations @DataDog/agent-devx-infra -/.gitlab/functional_test/system_probe_windows.yml @DataDog/agent-devx-infra @DataDog/windows-kernel-integrations +/.gitlab/functional_test_cleanup @DataDog/agent-security @DataDog/windows-kernel-integrations @DataDog/agent-devx-infra /.gitlab/functional_test/common.yml @DataDog/agent-devx-infra @DataDog/windows-kernel-integrations /.gitlab/functional_test/oracle.yml @DataDog/agent-devx-infra @DataDog/database-monitoring -/.gitlab/powershell_script_deploy/powershell_script_deploy.yml @DataDog/agent-delivery @DataDog/windows-agent -/.gitlab/powershell_script_deploy/powershell_script_signing.yml @DataDog/agent-delivery @DataDog/windows-agent +/.gitlab/powershell_script_deploy @DataDog/agent-delivery @DataDog/windows-agent /.gitlab/choco_build/choco_build.yml @DataDog/agent-delivery @DataDog/windows-agent /.gitlab/choco_deploy/choco_deploy.yml @DataDog/agent-delivery @DataDog/windows-agent @@ -164,8 +158,7 @@ /.gitlab/common/container_publish_job_templates.yml @DataDog/container-integrations @DataDog/agent-delivery /.gitlab/container_build/ @DataDog/container-integrations @DataDog/agent-delivery /.gitlab/container_build/include.yml @DataDog/container-integrations @DataDog/agent-delivery -/.gitlab/container_build/docker_windows_agent6.yml @DataDog/agent-delivery @DataDog/windows-agent -/.gitlab/container_build/docker_windows_agent7.yml @DataDog/agent-delivery @DataDog/windows-agent +/.gitlab/container_build/docker_windows* @DataDog/agent-delivery @DataDog/windows-agent /.gitlab/dev_container_deploy/ @DataDog/container-integrations @DataDog/agent-delivery /.gitlab/dev_container_deploy/fakeintake.yml @DataDog/agent-devx-loops @@ -174,7 +167,6 @@ /.gitlab/container_scan/container_scan.yml @DataDog/container-integrations @DataDog/agent-delivery -/.gitlab/internal_image_deploy.yml @DataDog/container-integrations @DataDog/agent-delivery /.gitlab/maintenance_jobs/docker.yml @DataDog/container-integrations @DataDog/agent-delivery @@ -190,9 +182,7 @@ /.gitlab/functional_test/regression_detector.yml @DataDog/single-machine-performance -/.gitlab/software_composition_analysis/ @DataDog/software-integrity-and-trust -/.gitlab/source_test_stats/windows.yml @DataDog/agent-platform @DataDog/windows-agent /chocolatey/ @DataDog/windows-agent @@ -223,7 +213,6 @@ /cmd/agent/dist/conf.d/win32_event_log.d/ @DataDog/windows-agent /cmd/agent/install*.sh @DataDog/container-ecosystems @DataDog/agent-delivery /cmd/cluster-agent/ @DataDog/container-platform -/cmd/cluster-agent/commands/ @DataDog/container-platform /cmd/cluster-agent-cloudfoundry/ @DataDog/platform-integrations /cmd/cluster-agent/api/v1/cloudfoundry_metadata.go @DataDog/platform-integrations /cmd/cws-instrumentation/ @DataDog/agent-security @@ -244,16 +233,12 @@ /cmd/system-probe/modules/tcp_queue_tracer* @DataDog/container-integrations /cmd/system-probe/modules/traceroute* @DataDog/network-device-monitoring @Datadog/Networks /cmd/system-probe/modules/ping* @DataDog/network-device-monitoring -/cmd/system-probe/modules/service_discover* @DataDog/universal-service-monitoring /cmd/system-probe/modules/language_detection* @DataDog/processes @DataDog/universal-service-monitoring -/cmd/system-probe/runtime/ @DataDog/agent-security /cmd/system-probe/modules/dynamic_instrumentation* @DataDog/debugger -/cmd/system-probe/windows/ @DataDog/windows-kernel-integrations /cmd/system-probe/windows_resources/ @DataDog/windows-kernel-integrations /cmd/system-probe/main_windows*.go @DataDog/windows-kernel-integrations /cmd/systray/ @DataDog/windows-agent /cmd/security-agent/ @DataDog/agent-security -/cmd/updater/ @DataDog/fleet @DataDog/windows-agent /cmd/installer/ @DataDog/fleet @DataDog/windows-agent /dev/ @DataDog/agent-devx-loops @@ -263,7 +248,7 @@ /Dockerfiles/agent/entrypoint.d.windows/ @DataDog/container-integrations @DataDog/windows-agent /Dockerfiles/agent/entrypoint.ps1 @DataDog/container-integrations @DataDog/windows-agent /Dockerfiles/agent/windows/ @DataDog/container-integrations @DataDog/windows-agent -/Dockerfiles/agent-ot/Dockerfile.byoc @DataDog/opentelemetry +/Dockerfiles/agent-ot @DataDog/opentelemetry /docs/ @DataDog/documentation @DataDog/agent-devx-loops /docs/dev/checks/ @DataDog/documentation @DataDog/agent-metrics-logs @@ -346,10 +331,8 @@ /pkg/commonchecks/ @DataDog/agent-metrics-logs /pkg/cli/ @DataDog/agent-shared-components /pkg/cli/subcommands/clusterchecks @DataDog/container-platform -/pkg/dogstatsd/ @DataDog/agent-metrics-logs /pkg/discovery/ @DataDog/universal-service-monitoring /pkg/errors/ @DataDog/agent-shared-components -/pkg/forwarder/ @DataDog/agent-processing-and-routing /pkg/gohai @DataDog/agent-shared-components /pkg/gpu/ @DataDog/ebpf-platform /pkg/jmxfetch/ @DataDog/agent-metrics-logs @@ -360,8 +343,6 @@ /pkg/serverless/ @DataDog/serverless @Datadog/serverless-aws /pkg/serverless/appsec/ @DataDog/asm-go /pkg/status/ @DataDog/agent-shared-components -/pkg/status/templates/trace-agent.tmpl @DataDog/agent-apm -/pkg/status/templates/process-agent.tmpl @DataDog/processes /pkg/telemetry/ @DataDog/agent-shared-components /pkg/telemetry/stat_gauge_wrapper.go @DataDog/ebpf-platform /pkg/telemetry/stat_counter_wrapper.go @DataDog/ebpf-platform @@ -404,7 +385,6 @@ /pkg/collector/corechecks/gpu/ @DataDog/ebpf-platform /pkg/collector/corechecks/network-devices/ @DataDog/network-device-monitoring /pkg/collector/corechecks/orchestrator/ @DataDog/container-app -/pkg/collector/corechecks/kubernetes/ @DataDog/container-integrations /pkg/collector/corechecks/net/ @DataDog/platform-integrations /pkg/collector/corechecks/oracle @DataDog/database-monitoring /pkg/collector/corechecks/sbom/ @DataDog/container-integrations @@ -417,14 +397,11 @@ /pkg/collector/corechecks/system/winproc/ @DataDog/windows-agent /pkg/collector/corechecks/systemd/ @DataDog/agent-integrations /pkg/collector/corechecks/nvidia/ @DataDog/platform-integrations -/pkg/collector/corechecks/windows_event_log/ @DataDog/windows-agent /pkg/config/ @DataDog/agent-shared-components /pkg/config/config_template.yaml @DataDog/agent-shared-components @DataDog/documentation /pkg/config/setup/apm.go @DataDog/agent-apm /pkg/config/autodiscovery/ @DataDog/container-integrations @DataDog/container-platform /pkg/config/env @DataDog/container-integrations @DataDog/container-platform -/pkg/config/logs @Datadog/agent-shared-components -/pkg/config/logs/internal/seelog/seelog_config.go @Datadog/agent-shared-components /pkg/config/setup @DataDog/agent-shared-components /pkg/config/setup/process*.go @DataDog/processes /pkg/config/setup/system_probe.go @DataDog/ebpf-platform @@ -440,15 +417,11 @@ /pkg/diagnose/ports/ @DataDog/agent-shared-components /pkg/eventmonitor/ @DataDog/ebpf-platform @DataDog/agent-security /pkg/dynamicinstrumentation/ @DataDog/debugger -/pkg/epforwarder/ @DataDog/agent-processing-and-routing /pkg/flare/ @DataDog/agent-shared-components /pkg/flare/*_win.go @Datadog/windows-agent /pkg/flare/*_windows.go @Datadog/windows-agent /pkg/flare/*_windows_test.go @Datadog/windows-agent /pkg/fleet/ @DataDog/fleet @DataDog/windows-agent -/pkg/otlp/ @DataDog/opentelemetry -/pkg/otlp/*_serverless*.go @DataDog/serverless @Datadog/serverless-aws -/pkg/otlp/*_not_serverless*.go @DataDog/opentelemetry /pkg/pidfile/ @DataDog/agent-shared-components /pkg/persistentcache/ @DataDog/agent-metrics-logs /pkg/proto/ @DataDog/agent-shared-components @@ -465,13 +438,11 @@ /pkg/util/clusteragent/ @DataDog/container-platform /pkg/util/containerd/ @DataDog/container-integrations /pkg/util/containers/ @DataDog/container-integrations -/pkg/util/containers/collectors/cloudfoundry.go @DataDog/platform-integrations /pkg/util/docker/ @DataDog/container-integrations /pkg/util/ecs/ @DataDog/container-integrations /pkg/util/funcs/ @DataDog/ebpf-platform /pkg/util/kernel/ @DataDog/ebpf-platform /pkg/util/kubernetes/ @DataDog/container-integrations @DataDog/container-platform @DataDog/container-app -/pkg/util/orchestrator/ @DataDog/container-app /pkg/util/podman/ @DataDog/container-integrations /pkg/util/prometheus @DataDog/container-integrations /pkg/util/tagger @DataDog/container-platform @@ -501,7 +472,6 @@ /pkg/logs/sender @DataDog/agent-processing-and-routing /pkg/process/ @DataDog/processes /pkg/process/util/address*.go @DataDog/Networks -/pkg/process/util/netns*.go @DataDog/Networks /pkg/process/checks/net*.go @DataDog/Networks /pkg/process/metadata/parser/ @DataDog/universal-service-monitoring @DataDog/processes @DataDog/Networks /pkg/process/metadata/parser/*windows* @DataDog/universal-service-monitoring @DataDog/processes @DataDog/Networks @DataDog/windows-kernel-integrations @@ -533,33 +503,19 @@ /pkg/network/ebpf/c/prebuilt/shared-libraries* @DataDog/universal-service-monitoring /pkg/network/ebpf/c/runtime/shared-libraries* @DataDog/universal-service-monitoring /pkg/network/ebpf/c/shared-libraries/ @DataDog/universal-service-monitoring -/pkg/network/ebpf/c/prebuilt/http* @DataDog/universal-service-monitoring -/pkg/network/ebpf/c/runtime/http* @DataDog/universal-service-monitoring /pkg/network/ebpf/c/protocols/ @DataDog/universal-service-monitoring /pkg/network/encoding/marshal/*usm* @DataDog/universal-service-monitoring /pkg/network/encoding/marshal/*_windows*.go @DataDog/windows-kernel-integrations -/pkg/network/etw/ @DataDog/windows-kernel-integrations /pkg/network/go/ @DataDog/universal-service-monitoring /pkg/network/protocols/ @DataDog/universal-service-monitoring /pkg/network/protocols/http/driver_*.go @DataDog/windows-kernel-integrations /pkg/network/protocols/http/etw*.go @DataDog/windows-kernel-integrations /pkg/network/protocols/http/*_windows*.go @DataDog/windows-kernel-integrations -/pkg/network/tracer/*classification*.go @DataDog/universal-service-monitoring -/pkg/network/tracer/testutil/http2/ @DataDog/universal-service-monitoring -/pkg/network/tracer/testutil/grpc/ @DataDog/universal-service-monitoring -/pkg/network/tracer/testutil/prefetch_file/ @DataDog/universal-service-monitoring /pkg/network/tracer/testutil/proxy/ @DataDog/universal-service-monitoring -/pkg/network/tracer/*usm*.go @DataDog/universal-service-monitoring /pkg/network/tracer/*_windows*.go @DataDog/windows-kernel-integrations /pkg/network/usm/ @DataDog/universal-service-monitoring /pkg/network/usm/tests/*_windows*.go @DataDog/windows-kernel-integrations /pkg/ebpf/ @DataDog/ebpf-platform -/pkg/ebpf/bytecode/runtime/conntrack.go @DataDog/Networks @DataDog/universal-service-monitoring -/pkg/ebpf/bytecode/runtime/usm.go @DataDog/Networks @DataDog/universal-service-monitoring -/pkg/ebpf/bytecode/runtime/oom-kill.go @DataDog/container-integrations -/pkg/ebpf/bytecode/runtime/runtime-security.go @DataDog/agent-security -/pkg/ebpf/bytecode/runtime/tcp-queue-length.go @DataDog/container-integrations -/pkg/ebpf/bytecode/runtime/tracer.go @DataDog/Networks @DataDog/universal-service-monitoring /pkg/ebpf/map_cleaner*.go @DataDog/universal-service-monitoring /pkg/compliance/ @DataDog/agent-cspm /pkg/databasemonitoring @DataDog/database-monitoring @@ -576,7 +532,6 @@ /pkg/collector/corechecks/networkpath/ @DataDog/network-device-monitoring @DataDog/Networks /releasenotes/ @DataDog/documentation -/releasenotes-installscript/ @DataDog/documentation /releasenotes-dca/ @DataDog/documentation /rtloader/ @DataDog/agent-metrics-logs @@ -605,7 +560,6 @@ /tasks/windows_resources.py @DataDog/windows-agent /tasks/components.py @DataDog/agent-shared-components /tasks/components_templates @DataDog/agent-shared-components -/tasks/updater.py @DataDog/fleet /tasks/libs/common/omnibus.py @DataDog/agent-delivery /tasks/omnibus.py @DataDog/agent-delivery /tasks/unit_tests/components_tests.py @DataDog/agent-shared-components @@ -622,8 +576,6 @@ /test/kitchen/ @DataDog/agent-devx-loops /test/kitchen/test-definitions/ @DataDog/container-ecosystems @DataDog/agent-delivery /test/kitchen/test/integration/ @DataDog/container-ecosystems @DataDog/agent-delivery -/test/kitchen/kitchen-azure-security-agent-test.yml @DataDog/agent-security -/test/kitchen/kitchen-vagrant-security-agent.yml @DataDog/agent-security /test/kitchen/site-cookbooks/dd-security-agent-check/ @DataDog/agent-security /test/kitchen/test/integration/security-agent-stress/ @DataDog/agent-security /test/kitchen/test/integration/security-agent-test/ @DataDog/agent-security @@ -634,7 +586,6 @@ /test/kitchen/test/integration/win-installopts/ @DataDog/windows-agent /test/kitchen/test/integration/win-no-subservices/ @DataDog/windows-agent /test/kitchen/test/integration/win-sysprobe-test/ @DataDog/windows-kernel-integrations -/test/kitchen/test/integration/win-reinstall-option/ @DataDog/windows-agent /test/kitchen/test/integration/win-repair/ @DataDog/windows-agent /test/kitchen/test/integration/win-user/ @DataDog/windows-agent /test/fakeintake/ @DataDog/agent-e2e-testing @DataDog/agent-devx-loops @@ -665,14 +616,13 @@ /test/new-e2e/tests/windows @DataDog/windows-agent @DataDog/windows-kernel-integrations /test/new-e2e/tests/apm @DataDog/agent-apm /test/new-e2e/tests/remote-config @DataDog/remote-config -/test/new-e2e/tests/updater @DataDog/fleet @DataDog/windows-agent /test/new-e2e/tests/installer @DataDog/fleet @DataDog/windows-agent +/test/new-e2e/tests/gpu @Datadog/ebpf-platform /test/otel/ @DataDog/opentelemetry /test/system/ @DataDog/agent-shared-components /test/system/dogstatsd/ @DataDog/agent-metrics-logs /test/benchmarks/apm_scripts/ @DataDog/agent-apm /test/regression/ @DataDog/single-machine-performance -/test/workload-checks/ @DataDog/single-machine-performance /tools/ @DataDog/agent-devx-loops /tools/ci @DataDog/agent-devx-infra diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a14c629d013c2..c9cd34dcf8af0 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -30,7 +30,6 @@ include: - .gitlab/integration_test/include.yml - .gitlab/internal_image_deploy/internal_image_deploy.yml - .gitlab/internal_kubernetes_deploy/include.yml - - .gitlab/junit_upload/junit_upload.yml - .gitlab/kitchen_cleanup/include.yml - .gitlab/kitchen_deploy/kitchen_deploy.yml - .gitlab/kitchen_testing/include.yml @@ -257,17 +256,19 @@ variables: VCPKG_BLOB_SAS_URL: ci.datadog-agent-buildimages.vcpkg_blob_sas_url # windows-agent WINGET_PAT: ci.datadog-agent.winget_pat # windows-agent # End aws ssm variables - + # Start vault variables AGENT_API_KEY_ORG2: agent-api-key-org-2 # agent-devx-infra AGENT_APP_KEY_ORG2: agent-ci-app-key-org-2 # agent-devx-infra AGENT_GITHUB_APP: agent-github-app # agent-devx-infra + AGENT_QA_E2E: agent-qa-e2e # agent-devx-loops ATLASSIAN_WRITE: atlassian-write # agent-devx-infra DOCKER_REGISTRY_RO: dockerhub-readonly # agent-delivery GITLAB_TOKEN: gitlab-token # agent-devx-infra INSTALL_SCRIPT_API_KEY_ORG2: install-script-api-key-org-2 # agent-devx-infra MACOS_GITHUB_APP_1: macos-github-app-one # agent-devx-infra MACOS_GITHUB_APP_2: macos-github-app-two # agent-devx-infra + SLACK_AGENT: slack-agent-ci # agent-devx-infra # End vault variables DD_PKG_VERSION: "latest" @@ -1188,3 +1189,12 @@ workflow: compare_to: main # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916 - when: manual allow_failure: true + +.on_gpu_or_e2e_changes: + - !reference [.on_e2e_main_release_or_rc] + - changes: + paths: + - pkg/gpu/**/* + - test/new-e2e/tests/gpu/**/* + - pkg/collector/corechecks/gpu/**/* + compare_to: main # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916 diff --git a/.gitlab/.ci-linters.yml b/.gitlab/.ci-linters.yml index db38acacdd029..b888dff051b73 100644 --- a/.gitlab/.ci-linters.yml +++ b/.gitlab/.ci-linters.yml @@ -109,7 +109,6 @@ job-owners: - trace-agent-v05-2cpus-normal_load-fixed_sps - trace-agent-v05-2cpus-stress_load-fixed_sps - trigger-flakes-finder - - unit_tests_arm64_junit_upload - unit_tests_notify - update_rc_build_links - validate_modules diff --git a/.gitlab/JOBOWNERS b/.gitlab/JOBOWNERS index add750d07f654..1855b1aa11db9 100644 --- a/.gitlab/JOBOWNERS +++ b/.gitlab/JOBOWNERS @@ -147,30 +147,9 @@ k8s-e2e-otlp-* @DataDog/opentelemetry k8s-e2e-cspm-* @DataDog/agent-security # New E2E -new-e2e-containers* @DataDog/container-integrations -new-e2e-agent-subcommands* @DataDog/agent-shared-components -new-e2e-agent-shared-components* @DataDog/agent-shared-components -new-e2e-language-detection* @DataDog/processes -new-e2e-process* @DataDog/processes -new-e2e-agent-platform* @DataDog/container-ecosystems @DataDog/agent-delivery -new-e2e-platform-integrations* @DataDog/agent-delivery @DataDog/platform-integrations -new-e2e-aml* @DataDog/agent-metrics-logs -new-e2e-apm* @DataDog/agent-apm -new-e2e-discovery* @Datadog/universal-service-monitoring -new-e2e-ndm* @DataDog/network-device-monitoring -new-e2e-npm* @DataDog/Networks -new-e2e-cws* @DataDog/agent-security -new-e2e-orchestrator* @DataDog/container-app -new-e2e-otel* @DataDog/opentelemetry -e2e_pre_test* @DataDog/agent-devx-loops -new-e2e-remote-config* @DataDog/remote-config -new-e2e-installer* @DataDog/fleet -new-e2e-installer-windows @DataDog/windows-agent -new-e2e-windows* @DataDog/windows-agent -new-e2e-windows-systemprobe @DataDog/windows-kernel-integrations -new-e2e-windows-security-agent @DataDog/windows-kernel-integrations -new-e2e_windows_powershell_module_test @DataDog/windows-kernel-integrations -new-e2e-eks-cleanup-on-failure @DataDog/agent-devx-loops +e2e_pre_test* @DataDog/agent-devx-loops +new-e2e* @DataDog/multiple + # Kernel matrix testing upload_dependencies* @DataDog/ebpf-platform diff --git a/.gitlab/choco_deploy/choco_deploy.yml b/.gitlab/choco_deploy/choco_deploy.yml index 56e66603fe9af..6d7f8959a554d 100644 --- a/.gitlab/choco_deploy/choco_deploy.yml +++ b/.gitlab/choco_deploy/choco_deploy.yml @@ -12,7 +12,7 @@ publish_choco_7_x64: before_script: - $tmpfile = [System.IO.Path]::GetTempFileName() - (& "$CI_PROJECT_DIR\tools\ci\fetch_secret.ps1" -parameterName "$Env:CHOCOLATEY_API_KEY" -tempFile "$tmpfile") - - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } + - If ($lastExitCode -ne "0") { exit "$lastExitCode" } - $chocolateyApiKey=$(cat "$tmpfile") - Remove-Item "$tmpfile" script: diff --git a/.gitlab/common/test_infra_version.yml b/.gitlab/common/test_infra_version.yml index 5316c7e28cf7d..d63d16e892122 100644 --- a/.gitlab/common/test_infra_version.yml +++ b/.gitlab/common/test_infra_version.yml @@ -4,4 +4,4 @@ variables: # and check the job creating the image to make sure you have the right SHA prefix TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX: "" # Make sure to update test-infra-definitions version in go.mod as well - TEST_INFRA_DEFINITIONS_BUILDIMAGES: f694c4dc33e4 + TEST_INFRA_DEFINITIONS_BUILDIMAGES: cd9a362371a8 diff --git a/.gitlab/container_build/docker_windows.yml b/.gitlab/container_build/docker_windows.yml index dada869f2a0d3..c9b67e24ef86b 100644 --- a/.gitlab/container_build/docker_windows.yml +++ b/.gitlab/container_build/docker_windows.yml @@ -34,8 +34,8 @@ -v "$(Get-Location):C:\mnt" -v \\.\pipe\docker_engine:\\.\pipe\docker_engine 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/windows_${Env:VARIANT}_x64${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} powershell - -C C:\mnt\tools\ci\docker-login.ps1 - - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } + -File C:\mnt\tools\ci\docker-login.ps1 + - If ($lastExitCode -ne "0") { exit "$lastExitCode" } - powershell -Command "$(Get-Location)\tools\ci\retry.ps1 docker build --no-cache --build-arg GENERAL_ARTIFACTS_CACHE_BUCKET_URL=${GENERAL_ARTIFACTS_CACHE_BUCKET_URL} ${BUILD_ARG} --pull --file ${BUILD_CONTEXT}/windows/amd64/Dockerfile --tag ${TARGET_TAG} ${BUILD_CONTEXT}" - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } - powershell -Command "$(Get-Location)\tools\ci\retry.ps1 docker push ${TARGET_TAG}" diff --git a/.gitlab/deploy_packages/winget.yml b/.gitlab/deploy_packages/winget.yml index f28f946b1fb0c..54261a8d1a867 100644 --- a/.gitlab/deploy_packages/winget.yml +++ b/.gitlab/deploy_packages/winget.yml @@ -12,7 +12,7 @@ publish_winget_7_x64: before_script: - $tmpfile = [System.IO.Path]::GetTempFileName() - (& "$CI_PROJECT_DIR\tools\ci\fetch_secret.ps1" -parameterName "$Env:WINGET_PAT" -tempFile "$tmpfile") - - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } + - If ($lastExitCode -ne "0") { exit "$lastExitCode" } - $wingetPat=$(cat "$tmpfile") - Remove-Item "$tmpfile" script: diff --git a/.gitlab/e2e/e2e.yml b/.gitlab/e2e/e2e.yml index daa7b5985ff4a..145e18aff5e75 100644 --- a/.gitlab/e2e/e2e.yml +++ b/.gitlab/e2e/e2e.yml @@ -11,11 +11,11 @@ - !reference [.retrieve_linux_go_e2e_deps] # Setup AWS Credentials - mkdir -p ~/.aws - - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_PROFILE >> ~/.aws/config || exit $? + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_E2E profile >> ~/.aws/config || exit $? - export AWS_PROFILE=agent-qa-ci # Now all `aws` commands target the agent-qa profile - - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SSH_PUBLIC_KEY_RSA > $E2E_PUBLIC_KEY_PATH || exit $? - - touch $E2E_PRIVATE_KEY_PATH && chmod 600 $E2E_PRIVATE_KEY_PATH && $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SSH_KEY_RSA > $E2E_PRIVATE_KEY_PATH || exit $? + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_E2E ssh_public_key_rsa > $E2E_PUBLIC_KEY_PATH || exit $? + - touch $E2E_PRIVATE_KEY_PATH && chmod 600 $E2E_PRIVATE_KEY_PATH && $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_E2E ssh_key_rsa > $E2E_PRIVATE_KEY_PATH || exit $? # Use S3 backend - pulumi login "s3://dd-pulumi-state?region=us-east-1&awssdk=v2&profile=$AWS_PROFILE" # Setup Azure credentials. https://www.pulumi.com/registry/packages/azure-native/installation-configuration/#set-configuration-using-pulumi-config @@ -120,6 +120,7 @@ new-e2e-containers-eks-init: TEAM: container-integrations EXTRA_PARAMS: --run TestEKSSuite E2E_INIT_ONLY: "true" + SHOULD_RUN_IN_FLAKES_FINDER: "false" allow_failure: true new-e2e-containers-eks: @@ -256,6 +257,7 @@ new-e2e-npm-eks-init: TEAM: network-performance-monitoring EXTRA_PARAMS: --run "TestEKSVMSuite" E2E_INIT_ONLY: "true" + SHOULD_RUN_IN_FLAKES_FINDER: "false" allow_failure: true new-e2e-npm-eks: @@ -302,6 +304,7 @@ new-e2e-cws: - qa_agent - qa_dca variables: + SHOULD_RUN_IN_FLAKES_FINDER: "false" # Currently broken in flake finder ADXT-687 TARGETS: ./tests/cws TEAM: csm-threats-agent CWS_INSTRUMENTATION_FULLIMAGEPATH: 669783387624.dkr.ecr.us-east-1.amazonaws.com/cws-instrumentation:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA} @@ -471,6 +474,7 @@ new-e2e-windows-systemprobe: variables: TARGETS: ./tests/sysprobe-functional TEAM: windows-kernel-integrations + SHOULD_RUN_IN_FLAKES_FINDER: "false" # Currently broken in flake finder ADXT-687 new-e2e-windows-security-agent: extends: .new_e2e_template @@ -493,14 +497,13 @@ new-e2e-otel-eks-init: - !reference [.manual] needs: - !reference [.needs_new_e2e_template] - - qa_dca - - qa_agent - - qa_agent_ot variables: TARGETS: ./tests/otel TEAM: otel EXTRA_PARAMS: --run TestOTelAgentIAEKS E2E_INIT_ONLY: "true" + SHOULD_RUN_IN_FLAKES_FINDER: "false" + allow_failure: true new-e2e-otel-eks: @@ -569,6 +572,19 @@ new-e2e-cspm: TEAM: cspm timeout: 35m +new-e2e-gpu: + extends: .new_e2e_template + rules: + - !reference [.on_gpu_or_e2e_changes] + - !reference [.manual] + variables: + TARGETS: ./tests/gpu # the target path where tests are + TEAM: ebpf-platform + needs: # list of required jobs. By default gitlab waits for any previous jobs. + - !reference [.needs_new_e2e_template] + - deploy_deb_testing-a7_x64 # agent 7 debian package + + generate-flakes-finder-pipeline: image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES stage: e2e @@ -623,11 +639,11 @@ new-e2e-eks-cleanup-on-failure: script: # Setup AWS Credentials - mkdir -p ~/.aws - - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_PROFILE >> ~/.aws/config || exit $? + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_E2E profile >> ~/.aws/config || exit $? - export AWS_PROFILE=agent-qa-ci # Now all `aws` commands target the agent-qa profile - - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SSH_PUBLIC_KEY_RSA > $E2E_PUBLIC_KEY_PATH || exit $? - - touch $E2E_PRIVATE_KEY_PATH && chmod 600 $E2E_PRIVATE_KEY_PATH && $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SSH_KEY_RSA > $E2E_PRIVATE_KEY_PATH || exit $? + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_E2E ssh_public_key_rsa > $E2E_PUBLIC_KEY_PATH || exit $? + - touch $E2E_PRIVATE_KEY_PATH && chmod 600 $E2E_PRIVATE_KEY_PATH && $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_E2E ssh_key_rsa > $E2E_PRIVATE_KEY_PATH || exit $? # Use S3 backend - PULUMI_CONFIG_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_PULUMI_CONFIG_PASSPHRASE) || exit $?; export PULUMI_CONFIG_PASSPHRASE - pulumi login "s3://dd-pulumi-state?region=us-east-1&awssdk=v2&profile=$AWS_PROFILE" diff --git a/.gitlab/integration_test/windows.yml b/.gitlab/integration_test/windows.yml index 2ddf04fddb997..39e24c348f56e 100644 --- a/.gitlab/integration_test/windows.yml +++ b/.gitlab/integration_test/windows.yml @@ -9,7 +9,7 @@ before_script: - $tmpfile = [System.IO.Path]::GetTempFileName() - (& "$CI_PROJECT_DIR\tools\ci\fetch_secret.ps1" -parameterName "$Env:VCPKG_BLOB_SAS_URL" -tempFile "$tmpfile") - - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } + - If ($lastExitCode -ne "0") { exit "$lastExitCode" } - $vcpkgBlobSaSUrl=$(cat "$tmpfile") - Remove-Item "$tmpfile" script: diff --git a/.gitlab/junit_upload/junit_upload.yml b/.gitlab/junit_upload/junit_upload.yml deleted file mode 100644 index 85cedd519efc8..0000000000000 --- a/.gitlab/junit_upload/junit_upload.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -unit_tests_arm64_junit_upload: - stage: junit_upload - rules: - - !reference [.except_mergequeue] - - when: always - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES - tags: ["arch:amd64"] - allow_failure: true - needs: - - tests_deb-arm64-py3 - - tests_rpm-arm64-py3 - script: - - $CI_PROJECT_DIR/tools/ci/junit_upload.sh diff --git a/.gitlab/kernel_matrix_testing/common.yml b/.gitlab/kernel_matrix_testing/common.yml index 6178035315b68..9768897240102 100644 --- a/.gitlab/kernel_matrix_testing/common.yml +++ b/.gitlab/kernel_matrix_testing/common.yml @@ -29,7 +29,7 @@ .write_ssh_key_file: - touch $AWS_EC2_SSH_KEY_FILE && chmod 600 $AWS_EC2_SSH_KEY_FILE - - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SSH_KEY > $AWS_EC2_SSH_KEY_FILE || exit $? + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_E2E ssh_key > $AWS_EC2_SSH_KEY_FILE || exit $? # Without the newline ssh silently fails and moves on to try other auth methods - echo "" >> $AWS_EC2_SSH_KEY_FILE - chmod 600 $AWS_EC2_SSH_KEY_FILE @@ -47,7 +47,7 @@ .kmt_new_profile: - mkdir -p ~/.aws - - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_PROFILE >> ~/.aws/config || exit $? + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_E2E profile >> ~/.aws/config || exit $? - export AWS_PROFILE=agent-qa-ci .define_if_collect_complexity: diff --git a/.gitlab/kernel_matrix_testing/security_agent.yml b/.gitlab/kernel_matrix_testing/security_agent.yml index 64929e7b335d3..6846a15f4a0a4 100644 --- a/.gitlab/kernel_matrix_testing/security_agent.yml +++ b/.gitlab/kernel_matrix_testing/security_agent.yml @@ -32,7 +32,7 @@ kmt_setup_env_secagent_arm64: AMI_ID_ARG: "--arm-ami-id=$KERNEL_MATRIX_TESTING_ARM_AMI_ID" LibvirtSSHKey: $CI_PROJECT_DIR/libvirt_rsa-arm TEST_COMPONENT: security-agent - TEST_SETS: cws_host + TEST_SETS: cws_host,cws_docker kmt_setup_env_secagent_x64: extends: @@ -167,7 +167,25 @@ kmt_run_secagent_tests_x64_docker: parallel: matrix: - TAG: + - "ubuntu_18.04" + - "ubuntu_20.04" + - "ubuntu_22.04" + - "ubuntu_23.10" - "ubuntu_24.04" + - "amazon_4.14" + - "amazon_5.4" + - "amazon_5.10" + - "amazon_2023" + - "fedora_37" + - "fedora_38" + - "debian_10" + - "debian_11" + - "debian_12" + - "centos_7.9" + - "oracle_8.9" + - "oracle_9.3" + - "rocky_8.5" + - "rocky_9.3" TEST_SET: [cws_docker] after_script: - !reference [.collect_outcomes_kmt] @@ -207,6 +225,39 @@ kmt_run_secagent_tests_arm64: - !reference [.collect_outcomes_kmt] - !reference [.upload_junit_kmt] +kmt_run_secagent_tests_arm64_docker: + extends: + - .kmt_run_secagent_tests + image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/system-probe_arm64$DATADOG_AGENT_SYSPROBE_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_SYSPROBE_BUILDIMAGES + tags: ["arch:arm64"] + needs: + - kmt_setup_env_secagent_arm64 + - upload_dependencies_secagent_arm64 + - upload_secagent_tests_arm64 + variables: + ARCH: "arm64" + parallel: + matrix: + - TAG: + - "ubuntu_22.04" + - "ubuntu_23.10" + - "ubuntu_24.04" + - "amazon_5.4" + - "amazon_5.10" + - "amazon_2023" + - "fedora_37" + - "fedora_38" + - "debian_11" + - "debian_12" + - "oracle_8.9" + - "oracle_9.3" + - "rocky_8.5" + - "rocky_9.3" + TEST_SET: ["cws_docker"] + after_script: + - !reference [.collect_outcomes_kmt] + - !reference [.upload_junit_kmt] + .kmt_secagent_cleanup: extends: - .kmt_cleanup diff --git a/.gitlab/notify/notify.yml b/.gitlab/notify/notify.yml index 3db4bf284d339..1feadeb33e838 100644 --- a/.gitlab/notify/notify.yml +++ b/.gitlab/notify/notify.yml @@ -114,7 +114,7 @@ notify_gitlab_ci_changes: timeout: 15 minutes # Added to prevent a stuck job blocking the resource_group defined above .failure_summary_setup: - - SLACK_API_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SLACK_AGENT_CI_TOKEN) || exit $?; export SLACK_API_TOKEN + - SLACK_API_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SLACK_AGENT token) || exit $?; export SLACK_API_TOKEN - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_TOKEN read_api) || exit $?; export GITLAB_TOKEN - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_API_KEY_ORG2 token) || exit $?; export DD_API_KEY - python3 -m pip install -r requirements.txt -r tasks/libs/requirements-notifications.txt diff --git a/.gitlab/source_test/linux.yml b/.gitlab/source_test/linux.yml index 31bcf7767b5b1..50e121aea3f7b 100644 --- a/.gitlab/source_test/linux.yml +++ b/.gitlab/source_test/linux.yml @@ -124,6 +124,7 @@ tests_deb-arm64-py3: - .rtloader_tests - .linux_tests after_script: + - !reference [.upload_junit_source] - !reference [.upload_coverage] image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_arm64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:arm64"] @@ -136,6 +137,7 @@ tests_rpm-arm64-py3: - .rtloader_tests - .linux_tests after_script: + - !reference [.upload_junit_source] - !reference [.upload_coverage] image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_arm64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:arm64"] @@ -172,7 +174,7 @@ new-e2e-unit-tests: - !reference [.retrieve_linux_go_e2e_deps] # Setup AWS Credentials - mkdir -p ~/.aws - - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_PROFILE >> ~/.aws/config || exit $? + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_E2E profile >> ~/.aws/config || exit $? - export AWS_PROFILE=agent-qa-ci # Use S3 backend - pulumi login "s3://dd-pulumi-state?region=us-east-1&awssdk=v2&profile=$AWS_PROFILE" diff --git a/.gitlab/source_test/windows.yml b/.gitlab/source_test/windows.yml index fa4b857241a24..96f3fa43806e6 100644 --- a/.gitlab/source_test/windows.yml +++ b/.gitlab/source_test/windows.yml @@ -46,7 +46,7 @@ -e COVERAGE_CACHE_FLAG="${COVERAGE_CACHE_FLAG}" 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} c:\mnt\tasks\winbuildscripts\unittests.bat - - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } + - If ($lastExitCode -ne "0") { exit "$lastExitCode" } variables: TEST_OUTPUT_FILE: test_output.json artifacts: diff --git a/CHANGELOG-DCA.rst b/CHANGELOG-DCA.rst index 786b6903a4c0d..49bee184e5718 100644 --- a/CHANGELOG-DCA.rst +++ b/CHANGELOG-DCA.rst @@ -2,8 +2,78 @@ Release Notes ============= +.. _Release Notes_7.58.0: + +7.58.0 +====== + +.. _Release Notes_7.58.0_Prelude: + +Prelude +------- + +Released on: 2024-10-21 +Pinned to datadog-agent v7.58.0: `CHANGELOG `_. + +.. _Release Notes_7.58.0_New Features: + +New Features +------------ + +- Added capability to tag any Kubernetes resource based on labels and annotations. + This feature can be configured with `kubernetes_resources_annotations_as_tags` and `kubernetes_resources_labels_as_tags`. + These feature configurations are associate group resources with annotations-to-tags (or labels-to-tags) map + For example, `deployments.apps` can be associated with an annotations-to-tags map to configure annotations as tags for deployments. + Example: + {`deployments.apps`: {`annotationKey1`: `tag1`, `annotationKey2`: `tag2`}} + +- The Kubernetes State Metrics (KSM) check can now be configured to collect + pods from the Kubelet in node agents instead of collecting them from the API + Server in the Cluster Agent or the Cluster check runners. This is useful in + clusters with a large number of pods where emitting pod metrics from a + single check instance can cause performance issues due to the large number + of metrics emitted. + + +.. _Release Notes_7.58.0_Enhancement Notes: + +Enhancement Notes +----------------- + +- Added a new option for the Cluster Agent + ("admission_controller.inject_config.type_socket_volumes") to specify that + injected volumes should be of type "Socket". This option is disabled by + default. When set to true, injected pods will not start until the Agent + creates the DogstatsD and trace-agent sockets. This ensures no traces or + DogstatsD metrics are lost, but it can cause the pod to wait if the Agent + has issues creating the sockets. + + +.. _Release Notes_7.58.0_Bug Fixes: + +Bug Fixes +--------- + +- Fixed an issue that prevented the Kubernetes autoscaler from evicting pods + injected by the Admission Controller. + + +.. _Release Notes_7.57.1: + +7.57.1 +====== + +.. _Release Notes_7.57.1_Prelude: + +Prelude +------- + +Released on: 2024-09-17 +Pinned to datadog-agent v7.57.1: `CHANGELOG `_. + .. _Release Notes_7.57.0: + 7.57.0 ====== diff --git a/CHANGELOG.rst b/CHANGELOG.rst index abbc7762d316e..270ceeac16d08 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,143 @@ Release Notes ============= +.. _Release Notes_7.58.0: + +7.58.0 +====== + +.. _Release Notes_7.58.0_Prelude: + +Prelude +------- + +Release on: 2024-10-21 + +- Please refer to the `7.58.0 tag on integrations-core `_ for the list of changes on the Core Checks + + +.. _Release Notes_7.58.0_Upgrade Notes: + +Upgrade Notes +------------- + +- Changes behavior of the timeout for Network Path. Previously, the timeout + signified the total time to wait for a full traceroute to complete. Now, + the timeout signifies the time to wait for each hop in the traceroute. + Additionally, the default timeout has been changed to 1000ms. + + +.. _Release Notes_7.58.0_New Features: + +New Features +------------ + +- Added capability to tag any Kubernetes resource based on labels and annotations. + This feature can be configured with `kubernetes_resources_annotations_as_tags` and `kubernetes_resources_labels_as_tags`. + These feature configurations are associate group resources with annotations-to-tags (or labels-to-tags) map + For example, `pods` can be associated with an annotations-to-tags map to configure annotations as tags for pods. + Example: + {`pods`: {`annotationKey1`: `tag1`, `annotationKey2`: `tag2`}} + +- The Kubernetes State Metrics (KSM) check can now be configured to collect + pods from the Kubelet in node agents instead of collecting them from the API + Server in the Cluster Agent or the Cluster check runners. This is useful in + clusters with a large number of pods where emitting pod metrics from a + single check instance can cause performance issues due to the large number + of metrics emitted. + +- NPM - adds UDP "Packets Sent" and "Packets Received" to the network telemetry in Linux. + +- [oracle] Add the ``active_session_history`` configuration parameter to optionally ingest Oracle active session history samples instead of query sampling. + +- Added config option ``logs_config.tag_truncated_logs``. When + enabled, file logs will come with a tag ``truncated:true`` if + they were truncated by the Agent. + + +.. _Release Notes_7.58.0_Enhancement Notes: + +Enhancement Notes +----------------- + +- [DBM] Bump go-sqllexer to 0.0.14 to skip collecting CTE tables as SQL metadata. + +- Agents are now built with Go ``1.22.7``. + +- Add the ability to tag cisco-sdwan device and interface metrics with user-defined tags. + +- Add support for setting a custom log source from resource attribute or log attribute `datadog.log.source`. + +- The default UDP port for traceroute (port 33434) is now used for Network Traffic based paths, instead of the port detected by NPM. + +- [oracle] Add ``oracle_client_lib_dir`` config parameter. + +- [oracle] Increase tablespace check interval from 1 to 10 minutes. + +- [oracle] Don't try to fetch execution plans where ``plan_hash_value`` is ``0`` + +- The OTLP ingest endpoint now maps the new OTel semantic convention `deployment.environment.name` to `env` + +- Prevents the use of the `process_config.run_in_core_agent.enabled` configuration option in unsupported environments. + +- APM: Trace payloads are now compressed with zstd by default. + + +.. _Release Notes_7.58.0_Security Notes: + +Security Notes +-------------- + +- Bump embedded Python version to 3.12.6 to address `CVE-2024-4030` and `CVE-2024-4741`. + +- Update cURL to 8.9.1. + +- Update OpenSSL to 3.3.2 (on Linux & macOS) in order to mitigate CVE-2024-6119. + + +.. _Release Notes_7.58.0_Bug Fixes: + +Bug Fixes +--------- + +- Adds missing support for the logs config key to work with AD annotations V2. + +- Fix ``agent jmx [command]`` subcommands for container environments with annotations-based configs. + +- Fixed issue with openSUSE 15 RC 6 where the eBPF tracer wouldn't start due to a failed validation of the ``tcp_sendpage`` probe. + +- Fixed a rare issue where short-lived containers could cause + logs to be sent with the wrong container ID. + +- Fix Windows Process Agent argument stripping to account for spaces in the executable path. + +- Fixes issue with the kubelet corecheck where `kubernetes.kubelet.volume.*` metrics + were not properly being reported if any matching namespace exclusion filter was present. + +- OOM Kill Check now reports the cgroup name of the victim process rather than the triggering process. + +- The process agent will no longer exit prematurely when language detection is enabled or + when there is a misconfiguration stemming from `process_config.run_in_core_agent.enabled`'s + default enablement in Kubernetes. + +- Change the ``datadog-security-agent`` Windows service display name from ``Datadog Security Service`` to + ``Datadog Security Agent`` for consistency with other Agent services. + +- Fix a bug preventing SNMP V3 reconnection. + + +.. _Release Notes_7.58.0_Other Notes: + +Other Notes +----------- + +- Add metric origins for the Kubeflow integration. + +- Add functional tests to Oracle using a Docker service to host the database instance. + +- Adds Agent telemetry for Oracle collector. + + .. _Release Notes_7.57.2: 7.57.2 diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index adb14f7d9d7a1..352b4bfc13623 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -245,6 +245,7 @@ core,github.com/Microsoft/hcsshim/osversion,MIT,Copyright (c) 2015 Microsoft | C core,github.com/Microsoft/hcsshim/pkg/ociwclayer,MIT,Copyright (c) 2015 Microsoft | Copyright (c) 2018 Microsoft Corp. All rights reserved core,github.com/NVIDIA/go-nvml/pkg/dl,Apache-2.0,Copyright 2023 NVIDIA CORPORATION core,github.com/NVIDIA/go-nvml/pkg/nvml,Apache-2.0,Copyright 2023 NVIDIA CORPORATION +core,github.com/NVIDIA/go-nvml/pkg/nvml/mock,Apache-2.0,Copyright 2023 NVIDIA CORPORATION core,github.com/NYTimes/gziphandler,Apache-2.0,Copyright 2016-2017 The New York Times Company core,github.com/OneOfOne/xxhash,Apache-2.0,Copyright (c) 2014 Ahmed W. (OneOfOne) core,github.com/ProtonMail/go-crypto/bitcurves,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved @@ -746,6 +747,7 @@ core,github.com/containerd/containerd/contrib/seccomp/kernelversion,Apache-2.0," core,github.com/containerd/containerd/defaults,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/containerd/diff,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/containerd/diff/proxy,Apache-2.0,"Copyright 2012-2015 Docker, Inc." +core,github.com/containerd/containerd/errdefs,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/containerd/events,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/containerd/events/exchange,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/containerd/filters,Apache-2.0,"Copyright 2012-2015 Docker, Inc." @@ -2840,19 +2842,27 @@ core,gopkg.in/DataDog/dd-trace-go.v1/internal,Apache-2.0,"Copyright 2016-Present core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec,Apache-2.0,"Copyright 2016-Present Datadog, Inc." core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config,Apache-2.0,"Copyright 2016-Present Datadog, Inc." core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo,Apache-2.0,"Copyright 2016-Present Datadog, Inc." +core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec,Apache-2.0,"Copyright 2016-Present Datadog, Inc." +core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/grpcsec,Apache-2.0,"Copyright 2016-Present Datadog, Inc." core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec,Apache-2.0,"Copyright 2016-Present Datadog, Inc." -core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/types,Apache-2.0,"Copyright 2016-Present Datadog, Inc." core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/ossec,Apache-2.0,"Copyright 2016-Present Datadog, Inc." -core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec,Apache-2.0,"Copyright 2016-Present Datadog, Inc." -core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sqlsec/types,Apache-2.0,"Copyright 2016-Present Datadog, Inc." +core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sqlsec,Apache-2.0,"Copyright 2016-Present Datadog, Inc." +core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace,Apache-2.0,"Copyright 2016-Present Datadog, Inc." +core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/usersec,Apache-2.0,"Copyright 2016-Present Datadog, Inc." +core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf,Apache-2.0,"Copyright 2016-Present Datadog, Inc." +core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions,Apache-2.0,"Copyright 2016-Present Datadog, Inc." +core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses,Apache-2.0,"Copyright 2016-Present Datadog, Inc." core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener,Apache-2.0,"Copyright 2016-Present Datadog, Inc." +core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/graphqlsec,Apache-2.0,"Copyright 2016-Present Datadog, Inc." +core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/grpcsec,Apache-2.0,"Copyright 2016-Present Datadog, Inc." core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec,Apache-2.0,"Copyright 2016-Present Datadog, Inc." core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/ossec,Apache-2.0,"Copyright 2016-Present Datadog, Inc." -core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sharedsec,Apache-2.0,"Copyright 2016-Present Datadog, Inc." core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sqlsec,Apache-2.0,"Copyright 2016-Present Datadog, Inc." -core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace,Apache-2.0,"Copyright 2016-Present Datadog, Inc." -core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace/httptrace,Apache-2.0,"Copyright 2016-Present Datadog, Inc." +core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/trace,Apache-2.0,"Copyright 2016-Present Datadog, Inc." +core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/usersec,Apache-2.0,"Copyright 2016-Present Datadog, Inc." +core,gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/waf,Apache-2.0,"Copyright 2016-Present Datadog, Inc." core,gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants,Apache-2.0,"Copyright 2016-Present Datadog, Inc." +core,gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils,Apache-2.0,"Copyright 2016-Present Datadog, Inc." core,gopkg.in/DataDog/dd-trace-go.v1/internal/datastreams,Apache-2.0,"Copyright 2016-Present Datadog, Inc." core,gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig,Apache-2.0,"Copyright 2016-Present Datadog, Inc." core,gopkg.in/DataDog/dd-trace-go.v1/internal/hostname,Apache-2.0,"Copyright 2016-Present Datadog, Inc." diff --git a/cmd/cluster-agent/api/server.go b/cmd/cluster-agent/api/server.go index 0b6ba72b63f08..350777aa8c8b0 100644 --- a/cmd/cluster-agent/api/server.go +++ b/cmd/cluster-agent/api/server.go @@ -123,14 +123,19 @@ func StartServer(ctx context.Context, w workloadmeta.Component, taggerComp tagge return struct{}{}, nil }) + maxMessageSize := cfg.GetInt("cluster_agent.cluster_tagger.grpc_max_message_size") opts := []grpc.ServerOption{ grpc.StreamInterceptor(grpc_auth.StreamServerInterceptor(authInterceptor)), grpc.UnaryInterceptor(grpc_auth.UnaryServerInterceptor(authInterceptor)), + grpc.MaxSendMsgSize(maxMessageSize), + grpc.MaxRecvMsgSize(maxMessageSize), } grpcSrv := grpc.NewServer(opts...) + // event size should be small enough to fit within the grpc max message size + maxEventSize := maxMessageSize / 2 pb.RegisterAgentSecureServer(grpcSrv, &serverSecure{ - taggerServer: taggerserver.NewServer(taggerComp), + taggerServer: taggerserver.NewServer(taggerComp, maxEventSize), }) timeout := pkgconfigsetup.Datadog().GetDuration("cluster_agent.server.idle_timeout_seconds") * time.Second diff --git a/cmd/serverless-init/log/log.go b/cmd/serverless-init/log/log.go index 1e1d42eb0ce87..66f03129e4e8c 100644 --- a/cmd/serverless-init/log/log.go +++ b/cmd/serverless-init/log/log.go @@ -58,19 +58,20 @@ func SetupLogAgent(conf *Config, tags map[string]string, tagger tagger.Component tagsArray := serverlessTag.MapToArray(tags) - addFileTailing(logsAgent, tagsArray) + addFileTailing(logsAgent, conf.source, tagsArray) serverlessLogs.SetLogsTags(tagsArray) return logsAgent } -func addFileTailing(logsAgent logsAgent.ServerlessLogsAgent, tags []string) { +func addFileTailing(logsAgent logsAgent.ServerlessLogsAgent, source string, tags []string) { if filePath, set := os.LookupEnv(envVarTailFilePath); set { src := sources.NewLogSource("serverless-file-tail", &logConfig.LogsConfig{ Type: logConfig.FileType, Path: filePath, Service: os.Getenv("DD_SERVICE"), Tags: tags, + Source: source, }) logsAgent.GetSources().AddSource(src) } diff --git a/cmd/serverless/dependencies_linux_amd64.txt b/cmd/serverless/dependencies_linux_amd64.txt index 90772c5f91fa6..25817824681a6 100644 --- a/cmd/serverless/dependencies_linux_amd64.txt +++ b/cmd/serverless/dependencies_linux_amd64.txt @@ -433,6 +433,7 @@ github.com/coreos/go-systemd/v22/dbus github.com/davecgh/go-spew/spew github.com/docker/go-units github.com/dustin/go-humanize +github.com/eapache/queue/v2 github.com/ebitengine/purego github.com/ebitengine/purego/internal/cgo github.com/ebitengine/purego/internal/strings @@ -461,6 +462,9 @@ github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities github.com/hashicorp/errwrap github.com/hashicorp/go-multierror +github.com/hashicorp/go-secure-stdlib/parseutil +github.com/hashicorp/go-secure-stdlib/strutil +github.com/hashicorp/go-sockaddr github.com/hashicorp/go-version github.com/hashicorp/golang-lru/v2 github.com/hashicorp/golang-lru/v2/internal @@ -532,6 +536,7 @@ github.com/richardartoul/molecule/src/protowire github.com/rivo/uniseg github.com/rs/cors github.com/rs/cors/internal +github.com/ryanuber/go-glob github.com/secure-systems-lab/go-securesystemslib/cjson github.com/shirou/gopsutil/v3/common github.com/shirou/gopsutil/v3/cpu @@ -934,6 +939,7 @@ google.golang.org/protobuf/types/known/fieldmaskpb google.golang.org/protobuf/types/known/structpb google.golang.org/protobuf/types/known/timestamppb google.golang.org/protobuf/types/known/wrapperspb +gopkg.in/DataDog/dd-trace-go.v1/appsec/events gopkg.in/DataDog/dd-trace-go.v1/datastreams/options gopkg.in/DataDog/dd-trace-go.v1/ddtrace gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext @@ -943,7 +949,27 @@ gopkg.in/DataDog/dd-trace-go.v1/internal gopkg.in/DataDog/dd-trace-go.v1/internal/appsec gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/grpcsec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/ossec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sqlsec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/usersec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/graphqlsec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/grpcsec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/ossec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sqlsec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/trace +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/usersec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/waf gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants +gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils gopkg.in/DataDog/dd-trace-go.v1/internal/datastreams gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig gopkg.in/DataDog/dd-trace-go.v1/internal/hostname @@ -961,6 +987,7 @@ gopkg.in/DataDog/dd-trace-go.v1/internal/orchestrion gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo gopkg.in/DataDog/dd-trace-go.v1/internal/remoteconfig gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames +gopkg.in/DataDog/dd-trace-go.v1/internal/stacktrace gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry gopkg.in/DataDog/dd-trace-go.v1/internal/traceprof gopkg.in/DataDog/dd-trace-go.v1/internal/version diff --git a/cmd/serverless/dependencies_linux_arm64.txt b/cmd/serverless/dependencies_linux_arm64.txt index 6734f6d29fec8..7740ac862f6ef 100644 --- a/cmd/serverless/dependencies_linux_arm64.txt +++ b/cmd/serverless/dependencies_linux_arm64.txt @@ -433,6 +433,7 @@ github.com/coreos/go-systemd/v22/dbus github.com/davecgh/go-spew/spew github.com/docker/go-units github.com/dustin/go-humanize +github.com/eapache/queue/v2 github.com/ebitengine/purego github.com/ebitengine/purego/internal/cgo github.com/ebitengine/purego/internal/strings @@ -461,6 +462,9 @@ github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities github.com/hashicorp/errwrap github.com/hashicorp/go-multierror +github.com/hashicorp/go-secure-stdlib/parseutil +github.com/hashicorp/go-secure-stdlib/strutil +github.com/hashicorp/go-sockaddr github.com/hashicorp/go-version github.com/hashicorp/golang-lru/v2 github.com/hashicorp/golang-lru/v2/internal @@ -531,6 +535,7 @@ github.com/richardartoul/molecule/src/protowire github.com/rivo/uniseg github.com/rs/cors github.com/rs/cors/internal +github.com/ryanuber/go-glob github.com/secure-systems-lab/go-securesystemslib/cjson github.com/shirou/gopsutil/v3/common github.com/shirou/gopsutil/v3/cpu @@ -933,6 +938,7 @@ google.golang.org/protobuf/types/known/fieldmaskpb google.golang.org/protobuf/types/known/structpb google.golang.org/protobuf/types/known/timestamppb google.golang.org/protobuf/types/known/wrapperspb +gopkg.in/DataDog/dd-trace-go.v1/appsec/events gopkg.in/DataDog/dd-trace-go.v1/datastreams/options gopkg.in/DataDog/dd-trace-go.v1/ddtrace gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext @@ -942,7 +948,27 @@ gopkg.in/DataDog/dd-trace-go.v1/internal gopkg.in/DataDog/dd-trace-go.v1/internal/appsec gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/grpcsec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/ossec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sqlsec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/usersec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/graphqlsec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/grpcsec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/ossec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sqlsec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/trace +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/usersec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/waf gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants +gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils gopkg.in/DataDog/dd-trace-go.v1/internal/datastreams gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig gopkg.in/DataDog/dd-trace-go.v1/internal/hostname @@ -960,6 +986,7 @@ gopkg.in/DataDog/dd-trace-go.v1/internal/orchestrion gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo gopkg.in/DataDog/dd-trace-go.v1/internal/remoteconfig gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames +gopkg.in/DataDog/dd-trace-go.v1/internal/stacktrace gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry gopkg.in/DataDog/dd-trace-go.v1/internal/traceprof gopkg.in/DataDog/dd-trace-go.v1/internal/version diff --git a/cmd/system-probe/config/config.go b/cmd/system-probe/config/config.go index 1ae05b24a411b..60fd9b679609b 100644 --- a/cmd/system-probe/config/config.go +++ b/cmd/system-probe/config/config.go @@ -148,7 +148,7 @@ func load() (*types.Config, error) { if cfg.GetBool(diNS("enabled")) { c.EnabledModules[DynamicInstrumentationModule] = struct{}{} } - if cfg.GetBool(nskey("ebpf_check", "enabled")) { + if cfg.GetBool(NSkey("ebpf_check", "enabled")) { c.EnabledModules[EBPFModule] = struct{}{} } if cfg.GetBool("system_probe_config.language_detection.enabled") { diff --git a/cmd/system-probe/config/ns.go b/cmd/system-probe/config/ns.go index 913b86b7915f5..59f519ddb26d9 100644 --- a/cmd/system-probe/config/ns.go +++ b/cmd/system-probe/config/ns.go @@ -9,64 +9,70 @@ import "strings" // spNS adds `system_probe_config` namespace to configuration key func spNS(k ...string) string { - return nskey("system_probe_config", k...) + return NSkey("system_probe_config", k...) } // netNS adds `network_config` namespace to configuration key func netNS(k ...string) string { - return nskey("network_config", k...) + return NSkey("network_config", k...) } // smNS adds `service_monitoring_config` namespace to configuration key func smNS(k ...string) string { - return nskey("service_monitoring_config", k...) + return NSkey("service_monitoring_config", k...) } // ccmNS adds `ccm_network_config` namespace to a configuration key func ccmNS(k ...string) string { - return nskey("ccm_network_config", k...) + return NSkey("ccm_network_config", k...) } // diNS adds `dynamic_instrumentation` namespace to configuration key func diNS(k ...string) string { - return nskey("dynamic_instrumentation", k...) + return NSkey("dynamic_instrumentation", k...) } // secNS adds `runtime_security_config` namespace to configuration key func secNS(k ...string) string { - return nskey("runtime_security_config", k...) + return NSkey("runtime_security_config", k...) } // evNS adds `event_monitoring_config` namespace to configuration key func evNS(k ...string) string { - return nskey("event_monitoring_config", k...) + return NSkey("event_monitoring_config", k...) } -func nskey(ns string, pieces ...string) string { +// NSkey returns a full key path in the config file by joining the given namespace and the rest of the path fragments +func NSkey(ns string, pieces ...string) string { return strings.Join(append([]string{ns}, pieces...), ".") } +// FullKeyPath returns a full key path in the config file by joining multiple path fragments +func FullKeyPath(pieces ...string) string { + return strings.Join(pieces, ".") +} + // wcdNS addes 'windows_crash_detection' namespace to config key func wcdNS(k ...string) string { - return nskey("windows_crash_detection", k...) + return NSkey("windows_crash_detection", k...) } // pngNS adds `ping` namespace to config key func pngNS(k ...string) string { - return nskey("ping", k...) + return NSkey("ping", k...) } // tracerouteNS adds `traceroute` namespace to config key func tracerouteNS(k ...string) string { - return nskey("traceroute", k...) + return NSkey("traceroute", k...) } // discoveryNS adds `discovery` namespace to config key func discoveryNS(k ...string) string { - return nskey("discovery", k...) + return NSkey("discovery", k...) } // gpuNS adds `gpu_monitoring` namespace to config key func gpuNS(k ...string) string { - return nskey("gpu_monitoring", k...) + return NSkey("gpu_monitoring", k...) } diff --git a/cmd/system-probe/modules/gpu_monitoring.go b/cmd/system-probe/modules/gpu.go similarity index 72% rename from cmd/system-probe/modules/gpu_monitoring.go rename to cmd/system-probe/modules/gpu.go index 5db344197a37a..3cd54e27293e5 100644 --- a/cmd/system-probe/modules/gpu_monitoring.go +++ b/cmd/system-probe/modules/gpu.go @@ -12,6 +12,7 @@ import ( "net/http" "time" + "github.com/NVIDIA/go-nvml/pkg/nvml" "go.uber.org/atomic" "github.com/DataDog/datadog-agent/cmd/system-probe/api/module" @@ -23,14 +24,28 @@ import ( ) var _ module.Module = &GPUMonitoringModule{} -var gpuMonitoringConfigNamespaces = []string{gpu.GPUConfigNS} +var gpuMonitoringConfigNamespaces = []string{gpu.GPUNS} // GPUMonitoring Factory var GPUMonitoring = module.Factory{ Name: config.GPUMonitoringModule, ConfigNamespaces: gpuMonitoringConfigNamespaces, - Fn: func(_ *sysconfigtypes.Config, _ module.FactoryDependencies) (module.Module, error) { - t, err := gpu.NewProbe(gpu.NewConfig(), nil) + Fn: func(_ *sysconfigtypes.Config, deps module.FactoryDependencies) (module.Module, error) { + + c := gpu.NewConfig() + probeDeps := gpu.ProbeDependencies{ + Telemetry: deps.Telemetry, + //if the config parameter doesn't exist or is empty string, the default value is used as defined in go-nvml library + //(https://github.com/NVIDIA/go-nvml/blob/main/pkg/nvml/lib.go#L30) + NvmlLib: nvml.New(nvml.WithLibraryPath(c.NVMLLibraryPath)), + } + + ret := probeDeps.NvmlLib.Init() + if ret != nvml.SUCCESS && ret != nvml.ERROR_ALREADY_INITIALIZED { + return nil, fmt.Errorf("unable to initialize NVML library: %v", ret) + } + + t, err := gpu.NewProbe(c, probeDeps) if err != nil { return nil, fmt.Errorf("unable to start GPU monitoring: %w", err) } diff --git a/comp/api/api/apiimpl/api.go b/comp/api/api/apiimpl/api.go index 88e51adce492e..c81ccd987c1ba 100644 --- a/comp/api/api/apiimpl/api.go +++ b/comp/api/api/apiimpl/api.go @@ -17,6 +17,7 @@ import ( "github.com/DataDog/datadog-agent/comp/api/authtoken" "github.com/DataDog/datadog-agent/comp/collector/collector" "github.com/DataDog/datadog-agent/comp/core/autodiscovery" + "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/comp/core/telemetry" @@ -40,6 +41,7 @@ func Module() fxutil.Module { type apiServer struct { dogstatsdServer dogstatsdServer.Component capture replay.Component + cfg config.Component pidMap pidmap.Component secretResolver secrets.Component rcService optional.Option[rcservice.Component] @@ -69,6 +71,7 @@ type dependencies struct { RcServiceMRF optional.Option[rcservicemrf.Component] AuthToken authtoken.Component Tagger tagger.Component + Cfg config.Component AutoConfig autodiscovery.Component LogsAgentComp optional.Option[logsAgent.Component] WorkloadMeta workloadmeta.Component @@ -91,6 +94,7 @@ func newAPIServer(deps dependencies) api.Component { rcServiceMRF: deps.RcServiceMRF, authToken: deps.AuthToken, taggerComp: deps.Tagger, + cfg: deps.Cfg, autoConfig: deps.AutoConfig, logsAgentComp: deps.LogsAgentComp, wmeta: deps.WorkloadMeta, diff --git a/comp/api/api/apiimpl/server.go b/comp/api/api/apiimpl/server.go index d1a3105ee5d3d..b4d42f1178218 100644 --- a/comp/api/api/apiimpl/server.go +++ b/comp/api/api/apiimpl/server.go @@ -79,6 +79,7 @@ func (server *apiServer) startServers() error { tlsConfig(), tlsCertPool, tmf, + server.cfg, ); err != nil { return fmt.Errorf("unable to start CMD API server: %v", err) } diff --git a/comp/api/api/apiimpl/server_cmd.go b/comp/api/api/apiimpl/server_cmd.go index 56215842c06d0..75db2d6815de3 100644 --- a/comp/api/api/apiimpl/server_cmd.go +++ b/comp/api/api/apiimpl/server_cmd.go @@ -22,6 +22,7 @@ import ( "github.com/DataDog/datadog-agent/comp/api/api/apiimpl/internal/agent" "github.com/DataDog/datadog-agent/comp/api/api/apiimpl/internal/check" "github.com/DataDog/datadog-agent/comp/api/api/apiimpl/observability" + "github.com/DataDog/datadog-agent/comp/core/config" taggerserver "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/server" workloadmetaServer "github.com/DataDog/datadog-agent/comp/core/workloadmeta/server" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" @@ -37,6 +38,7 @@ func (server *apiServer) startCMDServer( tlsConfig *tls.Config, tlsCertPool *x509.CertPool, tmf observability.TelemetryMiddlewareFactory, + cfg config.Component, ) (err error) { // get the transport we're going to use under HTTP server.cmdListener, err = getListener(cmdAddr) @@ -48,18 +50,25 @@ func (server *apiServer) startCMDServer( // gRPC server authInterceptor := grpcutil.AuthInterceptor(parseToken) + + maxMessageSize := cfg.GetInt("cluster_agent.cluster_tagger.grpc_max_message_size") + opts := []grpc.ServerOption{ grpc.Creds(credentials.NewClientTLSFromCert(tlsCertPool, cmdAddr)), grpc.StreamInterceptor(grpc_auth.StreamServerInterceptor(authInterceptor)), grpc.UnaryInterceptor(grpc_auth.UnaryServerInterceptor(authInterceptor)), + grpc.MaxRecvMsgSize(maxMessageSize), + grpc.MaxSendMsgSize(maxMessageSize), } + // event size should be small enough to fit within the grpc max message size + maxEventSize := maxMessageSize / 2 s := grpc.NewServer(opts...) pb.RegisterAgentServer(s, &grpcServer{}) pb.RegisterAgentSecureServer(s, &serverSecure{ configService: server.rcService, configServiceMRF: server.rcServiceMRF, - taggerServer: taggerserver.NewServer(server.taggerComp), + taggerServer: taggerserver.NewServer(server.taggerComp, maxEventSize), taggerComp: server.taggerComp, // TODO(components): decide if workloadmetaServer should be componentized itself workloadmetaServer: workloadmetaServer.NewServer(server.wmeta), diff --git a/comp/core/autodiscovery/autodiscoveryimpl/common_test.go b/comp/core/autodiscovery/autodiscoveryimpl/common_test.go index b6cdf32759ef3..277abef2c2308 100644 --- a/comp/core/autodiscovery/autodiscoveryimpl/common_test.go +++ b/comp/core/autodiscovery/autodiscoveryimpl/common_test.go @@ -54,6 +54,11 @@ func (s *dummyService) GetTags() ([]string, error) { return nil, nil } +// GetTagsWithCardinality returns the tags for this service +func (s *dummyService) GetTagsWithCardinality(_ string) ([]string, error) { + return s.GetTags() +} + // GetPid return a dummy pid func (s *dummyService) GetPid(context.Context) (int, error) { return s.Pid, nil diff --git a/comp/core/autodiscovery/common/utils/annotations.go b/comp/core/autodiscovery/common/utils/annotations.go index 0dabffa498f6d..606e7264c16fc 100644 --- a/comp/core/autodiscovery/common/utils/annotations.go +++ b/comp/core/autodiscovery/common/utils/annotations.go @@ -23,6 +23,7 @@ const ( logsConfigPath = "logs" checksPath = "checks" checkIDPath = "check.id" + checkTagCardinality = "check_tag_cardinality" ) // ExtractTemplatesFromMap looks for autodiscovery configurations in a given @@ -76,7 +77,10 @@ func extractCheckTemplatesFromMap(key string, input map[string]string, prefix st } // ParseBool returns `true` only on success cases ignoreAdTags, _ := strconv.ParseBool(input[prefix+ignoreAutodiscoveryTags]) - return BuildTemplates(key, checkNames, initConfigs, instances, ignoreAdTags), nil + + cardinality := input[prefix+checkTagCardinality] + + return BuildTemplates(key, checkNames, initConfigs, instances, ignoreAdTags, cardinality), nil } // extractLogsTemplatesFromMap returns the logs configuration from a given map, @@ -166,8 +170,8 @@ func ParseJSONValue(value string) ([][]integration.Data, error) { // BuildTemplates returns check configurations configured according to the // passed in AD identifier, check names, init, instance configs and their -// `ignoreAutoDiscoveryTags` field. -func BuildTemplates(adID string, checkNames []string, initConfigs, instances [][]integration.Data, ignoreAutodiscoveryTags bool) []integration.Config { +// `ignoreAutoDiscoveryTags`, `CheckTagCardinality` fields. +func BuildTemplates(adID string, checkNames []string, initConfigs, instances [][]integration.Data, ignoreAutodiscoveryTags bool, checkCard string) []integration.Config { templates := make([]integration.Config, 0) // sanity checks @@ -192,6 +196,7 @@ func BuildTemplates(adID string, checkNames []string, initConfigs, instances [][ Instances: []integration.Data{instance}, ADIdentifiers: []string{adID}, IgnoreAutodiscoveryTags: ignoreAutodiscoveryTags, + CheckTagCardinality: checkCard, }) } } diff --git a/comp/core/autodiscovery/common/utils/annotations_test.go b/comp/core/autodiscovery/common/utils/annotations_test.go index 9d554dc882444..985a848335f9e 100644 --- a/comp/core/autodiscovery/common/utils/annotations_test.go +++ b/comp/core/autodiscovery/common/utils/annotations_test.go @@ -344,12 +344,13 @@ func TestParseCheckNames(t *testing.T) { func TestBuildTemplates(t *testing.T) { key := "id" tests := []struct { - name string - inputCheckNames []string - inputInitConfig [][]integration.Data - inputInstances [][]integration.Data - expectedConfigs []integration.Config - ignoreAdTags bool + name string + inputCheckNames []string + inputInitConfig [][]integration.Data + inputInstances [][]integration.Data + expectedConfigs []integration.Config + ignoreAdTags bool + checkTagCardinality string }{ { name: "wrong number of checkNames", @@ -434,10 +435,40 @@ func TestBuildTemplates(t *testing.T) { }, }, }, + { + name: "valid inputs with list and checkCardinality", + inputCheckNames: []string{"a", "b"}, + inputInitConfig: [][]integration.Data{{integration.Data("{\"test\": 1}")}, {integration.Data("{}")}}, + inputInstances: [][]integration.Data{{integration.Data("{\"foo\": 1}"), integration.Data("{\"foo\": 2}")}, {integration.Data("{1:2}")}}, + checkTagCardinality: "low", + expectedConfigs: []integration.Config{ + { + Name: "a", + ADIdentifiers: []string{key}, + InitConfig: integration.Data("{\"test\": 1}"), + Instances: []integration.Data{integration.Data("{\"foo\": 1}")}, + CheckTagCardinality: "low", + }, + { + Name: "a", + ADIdentifiers: []string{key}, + InitConfig: integration.Data("{\"test\": 1}"), + Instances: []integration.Data{integration.Data("{\"foo\": 2}")}, + CheckTagCardinality: "low", + }, + { + Name: "b", + ADIdentifiers: []string{key}, + InitConfig: integration.Data("{}"), + Instances: []integration.Data{integration.Data("{1:2}")}, + CheckTagCardinality: "low", + }, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, tt.expectedConfigs, BuildTemplates(key, tt.inputCheckNames, tt.inputInitConfig, tt.inputInstances, tt.ignoreAdTags)) + assert.Equal(t, tt.expectedConfigs, BuildTemplates(key, tt.inputCheckNames, tt.inputInitConfig, tt.inputInstances, tt.ignoreAdTags, tt.checkTagCardinality)) }) } } diff --git a/comp/core/autodiscovery/common/utils/pod_annotations.go b/comp/core/autodiscovery/common/utils/pod_annotations.go index 2db5b0c323305..6b1fc5233bf22 100644 --- a/comp/core/autodiscovery/common/utils/pod_annotations.go +++ b/comp/core/autodiscovery/common/utils/pod_annotations.go @@ -60,6 +60,7 @@ func parseChecksJSON(adIdentifier string, checksJSON string) ([]integration.Conf Instances []interface{} `json:"instances"` Logs json.RawMessage `json:"logs"` IgnoreAutodiscoveryTags bool `json:"ignore_autodiscovery_tags"` + CheckTagCardinality string `json:"check_tag_cardinality"` } err := json.Unmarshal([]byte(checksJSON), &namedChecks) @@ -84,6 +85,8 @@ func parseChecksJSON(adIdentifier string, checksJSON string) ([]integration.Conf IgnoreAutodiscoveryTags: config.IgnoreAutodiscoveryTags, } + c.CheckTagCardinality = config.CheckTagCardinality + if len(config.Logs) > 0 { c.LogsConfig = integration.Data(config.Logs) } diff --git a/comp/core/autodiscovery/common/utils/pod_annotations_test.go b/comp/core/autodiscovery/common/utils/pod_annotations_test.go index 7043e7c19e301..f044409ecb492 100644 --- a/comp/core/autodiscovery/common/utils/pod_annotations_test.go +++ b/comp/core/autodiscovery/common/utils/pod_annotations_test.go @@ -137,6 +137,32 @@ func TestExtractTemplatesFromAnnotations(t *testing.T) { }, }, }, + { + name: "Nominal case with two templates and check tag cardinality", + annotations: map[string]string{ + "ad.datadoghq.com/foobar.check_names": "[\"apache\",\"http_check\"]", + "ad.datadoghq.com/foobar.init_configs": "[{},{}]", + "ad.datadoghq.com/foobar.instances": "[{\"apache_status_url\":\"http://%%host%%/server-status?auto\"},{\"name\":\"My service\",\"timeout\":1,\"url\":\"http://%%host%%\"}]", + "ad.datadoghq.com/foobar.check_tag_cardinality": "low", + }, + adIdentifier: "foobar", + output: []integration.Config{ + { + Name: "apache", + Instances: []integration.Data{integration.Data("{\"apache_status_url\":\"http://%%host%%/server-status?auto\"}")}, + InitConfig: integration.Data("{}"), + ADIdentifiers: []string{adID}, + CheckTagCardinality: "low", + }, + { + Name: "http_check", + Instances: []integration.Data{integration.Data("{\"name\":\"My service\",\"timeout\":1,\"url\":\"http://%%host%%\"}")}, + InitConfig: integration.Data("{}"), + ADIdentifiers: []string{adID}, + CheckTagCardinality: "low", + }, + }, + }, { name: "Take one, ignore one", annotations: map[string]string{ diff --git a/comp/core/autodiscovery/configresolver/configresolver.go b/comp/core/autodiscovery/configresolver/configresolver.go index 41a97f90e44d7..27579bc7c2db9 100644 --- a/comp/core/autodiscovery/configresolver/configresolver.go +++ b/comp/core/autodiscovery/configresolver/configresolver.go @@ -93,7 +93,13 @@ func Resolve(tpl integration.Config, svc listeners.Service) (integration.Config, return resolvedConfig, errors.New("unable to resolve, service not ready") } - tags, err := svc.GetTags() + var tags []string + var err error + if tpl.CheckTagCardinality != "" { + tags, err = svc.GetTagsWithCardinality(tpl.CheckTagCardinality) + } else { + tags, err = svc.GetTags() + } if err != nil { return resolvedConfig, fmt.Errorf("couldn't get tags for service '%s', err: %w", svc.GetServiceID(), err) } diff --git a/comp/core/autodiscovery/configresolver/configresolver_test.go b/comp/core/autodiscovery/configresolver/configresolver_test.go index a19ab496428a7..6f4ab6ac75e68 100644 --- a/comp/core/autodiscovery/configresolver/configresolver_test.go +++ b/comp/core/autodiscovery/configresolver/configresolver_test.go @@ -62,6 +62,11 @@ func (s *dummyService) GetTags() ([]string, error) { return []string{"foo:bar"}, nil } +// GetTagsWithCardinality returns the tags for this service +func (s *dummyService) GetTagsWithCardinality(_ string) ([]string, error) { + return s.GetTags() +} + // GetPid return a dummy pid func (s *dummyService) GetPid(context.Context) (int, error) { return s.Pid, nil diff --git a/comp/core/autodiscovery/integration/config.go b/comp/core/autodiscovery/integration/config.go index 8b7276c7dbbae..d4cca33c9d0e7 100644 --- a/comp/core/autodiscovery/integration/config.go +++ b/comp/core/autodiscovery/integration/config.go @@ -97,6 +97,9 @@ type Config struct { // IgnoreAutodiscoveryTags is used to ignore tags coming from autodiscovery IgnoreAutodiscoveryTags bool `json:"ignore_autodiscovery_tags"` // (include in digest: true) + // CheckTagCardinality is used to override the default tag cardinality in the agent configuration + CheckTagCardinality string `json:"check_tag_cardinality"` // (include in digest: false) + // MetricsExcluded is whether metrics collection is disabled (set by // container listeners only) MetricsExcluded bool `json:"metrics_excluded"` // (include in digest: false) diff --git a/comp/core/autodiscovery/listeners/cloudfoundry.go b/comp/core/autodiscovery/listeners/cloudfoundry.go index 99f8e1806ce70..d82436fba67bb 100644 --- a/comp/core/autodiscovery/listeners/cloudfoundry.go +++ b/comp/core/autodiscovery/listeners/cloudfoundry.go @@ -234,6 +234,11 @@ func (s *CloudFoundryService) GetTags() ([]string, error) { return s.tags, nil } +// GetTagsWithCardinality returns the tags with given cardinality. Not supported in CF +func (s *CloudFoundryService) GetTagsWithCardinality(cardinality string) ([]string, error) { + return s.GetTags() +} + // GetPid returns nil and an error because pids are currently not supported in CF func (s *CloudFoundryService) GetPid(context.Context) (int, error) { return -1, ErrNotSupported diff --git a/comp/core/autodiscovery/listeners/dbm_aurora.go b/comp/core/autodiscovery/listeners/dbm_aurora.go index f29727a9dc6ef..062a7f24a400d 100644 --- a/comp/core/autodiscovery/listeners/dbm_aurora.go +++ b/comp/core/autodiscovery/listeners/dbm_aurora.go @@ -239,6 +239,11 @@ func (d *DBMAuroraService) GetTags() ([]string, error) { return []string{}, nil } +// GetTagsWithCardinality returns the tags with given cardinality. Not supported in DBMAuroraService +func (d *DBMAuroraService) GetTagsWithCardinality(_ string) ([]string, error) { + return d.GetTags() +} + // GetPid returns nil and an error because pids are currently not supported func (d *DBMAuroraService) GetPid(context.Context) (int, error) { return -1, ErrNotSupported diff --git a/comp/core/autodiscovery/listeners/environment.go b/comp/core/autodiscovery/listeners/environment.go index a63d7430e6a25..f49152d95b106 100644 --- a/comp/core/autodiscovery/listeners/environment.go +++ b/comp/core/autodiscovery/listeners/environment.go @@ -109,6 +109,11 @@ func (s *EnvironmentService) GetTags() ([]string, error) { return nil, nil } +// GetTagsWithCardinality returns the tags with given cardinality. Not supported in EnvironmentService +func (s *EnvironmentService) GetTagsWithCardinality(_ string) ([]string, error) { + return s.GetTags() +} + // GetPid inspect the container and return its pid // Not relevant in this listener func (s *EnvironmentService) GetPid(context.Context) (int, error) { diff --git a/comp/core/autodiscovery/listeners/kube_endpoints.go b/comp/core/autodiscovery/listeners/kube_endpoints.go index a0ef296d318f7..c3ac90d6ea208 100644 --- a/comp/core/autodiscovery/listeners/kube_endpoints.go +++ b/comp/core/autodiscovery/listeners/kube_endpoints.go @@ -487,6 +487,11 @@ func (s *KubeEndpointService) GetTags() ([]string, error) { return s.tags, nil } +// GetTagsWithCardinality returns the tags with given cardinality. +func (s *KubeEndpointService) GetTagsWithCardinality(_ string) ([]string, error) { + return s.GetTags() +} + // GetHostname returns nil and an error because port is not supported in Kubelet func (s *KubeEndpointService) GetHostname(context.Context) (string, error) { return "", ErrNotSupported diff --git a/comp/core/autodiscovery/listeners/kube_services.go b/comp/core/autodiscovery/listeners/kube_services.go index 7119870471e99..c9019e21b8b30 100644 --- a/comp/core/autodiscovery/listeners/kube_services.go +++ b/comp/core/autodiscovery/listeners/kube_services.go @@ -368,6 +368,11 @@ func (s *KubeServiceService) GetTags() ([]string, error) { return s.tags, nil } +// GetTagsWithCardinality returns the tags with given cardinality. +func (s *KubeServiceService) GetTagsWithCardinality(_ string) ([]string, error) { + return s.GetTags() +} + // GetHostname returns nil and an error because port is not supported in Kubelet func (s *KubeServiceService) GetHostname(context.Context) (string, error) { return "", ErrNotSupported diff --git a/comp/core/autodiscovery/listeners/service.go b/comp/core/autodiscovery/listeners/service.go index 841af2456caa8..92c8d85f6ea58 100644 --- a/comp/core/autodiscovery/listeners/service.go +++ b/comp/core/autodiscovery/listeners/service.go @@ -14,6 +14,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/tagger" taggercommon "github.com/DataDog/datadog-agent/comp/core/tagger/common" + "github.com/DataDog/datadog-agent/comp/core/tagger/types" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/containers" @@ -92,6 +93,16 @@ func (s *service) GetTags() ([]string, error) { return tagger.Tag(taggercommon.BuildTaggerEntityID(s.entity.GetID()).String(), tagger.ChecksCardinality()) } +// GetTagsWithCardinality returns the tags with given cardinality. +func (s *service) GetTagsWithCardinality(cardinality string) ([]string, error) { + checkCard, err := types.StringToTagCardinality(cardinality) + if err == nil { + return tagger.Tag(taggercommon.BuildTaggerEntityID(s.entity.GetID()).String(), checkCard) + } + log.Warnf("error converting cardinality %s to TagCardinality: %v", cardinality, err) + return s.GetTags() +} + // GetPid returns the process ID of the service. func (s *service) GetPid(_ context.Context) (int, error) { return s.pid, nil diff --git a/comp/core/autodiscovery/listeners/snmp.go b/comp/core/autodiscovery/listeners/snmp.go index fa5fd2937bccc..dbc12b7ac4a1d 100644 --- a/comp/core/autodiscovery/listeners/snmp.go +++ b/comp/core/autodiscovery/listeners/snmp.go @@ -357,6 +357,11 @@ func (s *SNMPService) GetTags() ([]string, error) { return []string{}, nil } +// GetTagsWithCardinality returns the tags with given cardinality. +func (s *SNMPService) GetTagsWithCardinality(_ string) ([]string, error) { + return s.GetTags() +} + // GetPid returns nil and an error because pids are currently not supported func (s *SNMPService) GetPid(context.Context) (int, error) { return -1, ErrNotSupported diff --git a/comp/core/autodiscovery/listeners/staticconfig.go b/comp/core/autodiscovery/listeners/staticconfig.go index 0e537c49bf189..ad06ab28eb49f 100644 --- a/comp/core/autodiscovery/listeners/staticconfig.go +++ b/comp/core/autodiscovery/listeners/staticconfig.go @@ -92,6 +92,11 @@ func (s *StaticConfigService) GetTags() ([]string, error) { return nil, nil } +// GetTagsWithCardinality returns the tags with given cardinality. +func (s *StaticConfigService) GetTagsWithCardinality(_ string) ([]string, error) { + return s.GetTags() +} + // GetPid inspect the container and return its pid // Not relevant in this listener func (s *StaticConfigService) GetPid(context.Context) (int, error) { diff --git a/comp/core/autodiscovery/listeners/types.go b/comp/core/autodiscovery/listeners/types.go index 8718b46b72c85..bcdbe1aeebaeb 100644 --- a/comp/core/autodiscovery/listeners/types.go +++ b/comp/core/autodiscovery/listeners/types.go @@ -25,17 +25,18 @@ type ContainerPort struct { // It should be matched with a check template by the ConfigResolver using the // ADIdentifiers field. type Service interface { - Equal(Service) bool // compare two services - GetServiceID() string // unique service name - GetADIdentifiers(context.Context) ([]string, error) // identifiers on which templates will be matched - GetHosts(context.Context) (map[string]string, error) // network --> IP address - GetPorts(context.Context) ([]ContainerPort, error) // network ports - GetTags() ([]string, error) // tags - GetPid(context.Context) (int, error) // process identifier - GetHostname(context.Context) (string, error) // hostname.domainname for the entity - IsReady(context.Context) bool // is the service ready - HasFilter(containers.FilterType) bool // whether the service is excluded by metrics or logs exclusion config - GetExtraConfig(string) (string, error) // Extra configuration values + Equal(Service) bool // compare two services + GetServiceID() string // unique service name + GetADIdentifiers(context.Context) ([]string, error) // identifiers on which templates will be matched + GetHosts(context.Context) (map[string]string, error) // network --> IP address + GetPorts(context.Context) ([]ContainerPort, error) // network ports + GetTags() ([]string, error) // tags + GetTagsWithCardinality(cardinality string) ([]string, error) // tags with given cardinality + GetPid(context.Context) (int, error) // process identifier + GetHostname(context.Context) (string, error) // hostname.domainname for the entity + IsReady(context.Context) bool // is the service ready + HasFilter(containers.FilterType) bool // whether the service is excluded by metrics or logs exclusion config + GetExtraConfig(string) (string, error) // Extra configuration values // FilterTemplates filters the templates which will be resolved against // this service, in a map keyed by template digest. diff --git a/comp/core/autodiscovery/providers/config_reader.go b/comp/core/autodiscovery/providers/config_reader.go index 9f7b8cd13f8fd..f048953469884 100644 --- a/comp/core/autodiscovery/providers/config_reader.go +++ b/comp/core/autodiscovery/providers/config_reader.go @@ -35,6 +35,7 @@ type configFormat struct { Instances []integration.RawMap DockerImages []string `yaml:"docker_images"` // Only imported for deprecation warning IgnoreAutodiscoveryTags bool `yaml:"ignore_autodiscovery_tags"` // Use to ignore tags coming from autodiscovery + CheckTagCardinality string `yaml:"check_tag_cardinality"` // Use to set the tag cardinality override for the check } type configPkg struct { @@ -430,6 +431,9 @@ func GetIntegrationConfigFromFile(name, fpath string) (integration.Config, error // Copy ignore_autodiscovery_tags parameter conf.IgnoreAutodiscoveryTags = cf.IgnoreAutodiscoveryTags + // Copy check_tag_cardinality parameter + conf.CheckTagCardinality = cf.CheckTagCardinality + // DockerImages entry was found: we ignore it if no ADIdentifiers has been found if len(cf.DockerImages) > 0 && len(cf.ADIdentifiers) == 0 { return conf, errors.New("the 'docker_images' section is deprecated, please use 'ad_identifiers' instead") diff --git a/comp/core/autodiscovery/providers/consul.go b/comp/core/autodiscovery/providers/consul.go index 107694e975c59..a6b3816f7aba1 100644 --- a/comp/core/autodiscovery/providers/consul.go +++ b/comp/core/autodiscovery/providers/consul.go @@ -237,7 +237,7 @@ func (p *ConsulConfigProvider) getTemplates(ctx context.Context, key string) []i log.Errorf("Failed to retrieve instances at %s. Error: %s", instanceKey, err) return templates } - return utils.BuildTemplates(key, checkNames, initConfigs, instances, false) + return utils.BuildTemplates(key, checkNames, initConfigs, instances, false, "") } // getValue returns value, error diff --git a/comp/core/autodiscovery/providers/etcd.go b/comp/core/autodiscovery/providers/etcd.go index bb08f9306f9d4..e985fe31a9ed9 100644 --- a/comp/core/autodiscovery/providers/etcd.go +++ b/comp/core/autodiscovery/providers/etcd.go @@ -124,7 +124,7 @@ func (p *EtcdConfigProvider) getTemplates(ctx context.Context, key string) []int return nil } - return utils.BuildTemplates(key, checkNames, initConfigs, instances, false) + return utils.BuildTemplates(key, checkNames, initConfigs, instances, false, "") } // getEtcdValue retrieves content from etcd diff --git a/comp/core/autodiscovery/providers/kube_endpoints_file.go b/comp/core/autodiscovery/providers/kube_endpoints_file.go index c9d1ceb568800..469882c7af1ce 100644 --- a/comp/core/autodiscovery/providers/kube_endpoints_file.go +++ b/comp/core/autodiscovery/providers/kube_endpoints_file.go @@ -297,6 +297,7 @@ func endpointChecksFromTemplate(tpl integration.Config, ep *v1.Endpoints) []inte Provider: names.KubeEndpointsFile, Source: tpl.Source, IgnoreAutodiscoveryTags: tpl.IgnoreAutodiscoveryTags, + CheckTagCardinality: tpl.CheckTagCardinality, } utils.ResolveEndpointConfigAuto(config, ep.Subsets[i].Addresses[j]) diff --git a/comp/core/autodiscovery/providers/zookeeper.go b/comp/core/autodiscovery/providers/zookeeper.go index 7815bcd9011aa..999a5eadf041a 100644 --- a/comp/core/autodiscovery/providers/zookeeper.go +++ b/comp/core/autodiscovery/providers/zookeeper.go @@ -198,7 +198,7 @@ func (z *ZookeeperConfigProvider) getTemplates(key string) []integration.Config return nil } - return utils.BuildTemplates(key, checkNames, initConfigs, instances, false) + return utils.BuildTemplates(key, checkNames, initConfigs, instances, false, "") } func (z *ZookeeperConfigProvider) getJSONValue(key string) ([][]integration.Data, error) { diff --git a/comp/core/tagger/README.md b/comp/core/tagger/README.md index 88ad5d8229623..774a2fcaeecf2 100644 --- a/comp/core/tagger/README.md +++ b/comp/core/tagger/README.md @@ -53,7 +53,6 @@ Tagger entities are identified by a string-typed ID, with one of the following f | workloadmeta.KindContainer | `container_id://` | | workloadmeta.KindContainerImageMetadata | `container_image_metadata://` | | workloadmeta.KindECSTask | `ecs_task://` | -| workloadmeta.KindHost | `host://` | | workloadmeta.KindKubernetesDeployment | `deployment:///` | | workloadmeta.KindKubernetesMetadata | `kubernetes_metadata://///` (`` is empty in cluster-scoped objects) | | workloadmeta.KindKubernetesPod | `kubernetes_pod_uid://` | diff --git a/comp/core/tagger/common/entity_id_builder.go b/comp/core/tagger/common/entity_id_builder.go index 93774664bc7bf..798faa8df3fc0 100644 --- a/comp/core/tagger/common/entity_id_builder.go +++ b/comp/core/tagger/common/entity_id_builder.go @@ -27,8 +27,6 @@ func BuildTaggerEntityID(entityID workloadmeta.EntityID) types.EntityID { return types.NewEntityID(types.Process, entityID.ID) case workloadmeta.KindKubernetesDeployment: return types.NewEntityID(types.KubernetesDeployment, entityID.ID) - case workloadmeta.KindHost: - return types.NewEntityID(types.Host, entityID.ID) case workloadmeta.KindKubernetesMetadata: return types.NewEntityID(types.KubernetesMetadata, entityID.ID) default: diff --git a/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go b/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go index d291d5228767a..5e8bd77951d3a 100644 --- a/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go +++ b/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go @@ -142,8 +142,6 @@ func (c *WorkloadMetaCollector) processEvents(evBundle workloadmeta.EventBundle) tagInfos = append(tagInfos, c.handleECSTask(ev)...) case workloadmeta.KindContainerImageMetadata: tagInfos = append(tagInfos, c.handleContainerImage(ev)...) - case workloadmeta.KindHost: - tagInfos = append(tagInfos, c.handleHostTags(ev)...) case workloadmeta.KindKubernetesMetadata: tagInfos = append(tagInfos, c.handleKubeMetadata(ev)...) case workloadmeta.KindProcess: @@ -199,7 +197,6 @@ func (c *WorkloadMetaCollector) handleContainer(ev workloadmeta.Event) []*types. tagList.AddLow(tags.ShortImage, image.ShortName) tagList.AddLow(tags.ImageTag, image.Tag) tagList.AddLow(tags.ImageID, image.ID) - tagList.AddLow(tags.KubeGPUType, container.Resources.GPUType) if container.Runtime == workloadmeta.ContainerRuntimeDocker { if image.Tag != "" { @@ -231,6 +228,11 @@ func (c *WorkloadMetaCollector) handleContainer(ev workloadmeta.Event) []*types. tagList.AddLow(tag, value) } + // gpu tags from container resource requests + for _, gpuVendor := range container.Resources.GPUVendorList { + tagList.AddLow(tags.KubeGPUVendor, gpuVendor) + } + low, orch, high, standard := tagList.Compute() return []*types.TagInfo{ { @@ -294,17 +296,6 @@ func (c *WorkloadMetaCollector) handleContainerImage(ev workloadmeta.Event) []*t } } -func (c *WorkloadMetaCollector) handleHostTags(ev workloadmeta.Event) []*types.TagInfo { - hostTags := ev.Entity.(*workloadmeta.HostTags) - return []*types.TagInfo{ - { - Source: hostSource, - EntityID: types.NewEntityID("internal", "global-entity-id"), - LowCardTags: hostTags.HostTags, - }, - } -} - func (c *WorkloadMetaCollector) labelsToTags(labels map[string]string, tags *taglist.TagList) { // standard tags from labels c.extractFromMapWithFn(labels, standardDockerLabels, tags.AddStandard) @@ -357,6 +348,11 @@ func (c *WorkloadMetaCollector) extractTagsFromPodEntity(pod *workloadmeta.Kuber k8smetadata.AddMetadataAsTags(name, value, c.k8sResourcesAnnotationsAsTags["namespaces"], c.globK8sResourcesAnnotations["namespaces"], tagList) } + // gpu requested vendor as tags + for _, gpuVendor := range pod.GPUVendorList { + tagList.AddLow(tags.KubeGPUVendor, gpuVendor) + } + kubeServiceDisabled := false for _, disabledTag := range pkgconfigsetup.Datadog().GetStringSlice("kubernetes_ad_tags_disabled") { if disabledTag == "kube_service" { diff --git a/comp/core/tagger/taggerimpl/collectors/workloadmeta_main.go b/comp/core/tagger/taggerimpl/collectors/workloadmeta_main.go index fe561a0ca3481..23c5973e363e4 100644 --- a/comp/core/tagger/taggerimpl/collectors/workloadmeta_main.go +++ b/comp/core/tagger/taggerimpl/collectors/workloadmeta_main.go @@ -34,7 +34,6 @@ const ( containerSource = workloadmetaCollectorName + "-" + string(workloadmeta.KindContainer) containerImageSource = workloadmetaCollectorName + "-" + string(workloadmeta.KindContainerImageMetadata) processSource = workloadmetaCollectorName + "-" + string(workloadmeta.KindProcess) - hostSource = workloadmetaCollectorName + "-" + string(workloadmeta.KindHost) kubeMetadataSource = workloadmetaCollectorName + "-" + string(workloadmeta.KindKubernetesMetadata) deploymentSource = workloadmetaCollectorName + "-" + string(workloadmeta.KindKubernetesDeployment) diff --git a/comp/core/tagger/taggerimpl/collectors/workloadmeta_test.go b/comp/core/tagger/taggerimpl/collectors/workloadmeta_test.go index 49078f94ac2c7..830176d4abbc7 100644 --- a/comp/core/tagger/taggerimpl/collectors/workloadmeta_test.go +++ b/comp/core/tagger/taggerimpl/collectors/workloadmeta_test.go @@ -844,6 +844,60 @@ func TestHandleKubePod(t *testing.T) { }, }, }, + { + name: "pod with containers requesting gpu resources", + pod: workloadmeta.KubernetesPod{ + EntityID: podEntityID, + EntityMeta: workloadmeta.EntityMeta{ + Name: podName, + Namespace: podNamespace, + }, + GPUVendorList: []string{"nvidia"}, + Containers: []workloadmeta.OrchestratorContainer{ + { + ID: fullyFleshedContainerID, + Name: containerName, + Image: image, + }, + }, + }, + expected: []*types.TagInfo{ + { + Source: podSource, + EntityID: podTaggerEntityID, + HighCardTags: []string{}, + OrchestratorCardTags: []string{ + fmt.Sprintf("pod_name:%s", podName), + }, + LowCardTags: []string{ + fmt.Sprintf("kube_namespace:%s", podNamespace), + "gpu_vendor:nvidia", + }, + StandardTags: []string{}, + }, + { + Source: podSource, + EntityID: fullyFleshedContainerTaggerEntityID, + HighCardTags: []string{ + fmt.Sprintf("container_id:%s", fullyFleshedContainerID), + fmt.Sprintf("display_container_name:%s_%s", runtimeContainerName, podName), + }, + OrchestratorCardTags: []string{ + fmt.Sprintf("pod_name:%s", podName), + }, + LowCardTags: append([]string{ + fmt.Sprintf("kube_namespace:%s", podNamespace), + fmt.Sprintf("kube_container_name:%s", containerName), + "image_id:datadog/agent@sha256:a63d3f66fb2f69d955d4f2ca0b229385537a77872ffc04290acae65aed5317d2", + "image_name:datadog/agent", + "image_tag:latest", + "short_image:agent", + "gpu_vendor:nvidia", + }, standardTags...), + StandardTags: standardTags, + }, + }, + }, } for _, tt := range tests { @@ -2072,7 +2126,7 @@ func TestHandleContainer(t *testing.T) { Name: containerName, }, Resources: workloadmeta.ContainerResources{ - GPUType: "nvidia", + GPUVendorList: []string{"nvidia"}, }, }, expected: []*types.TagInfo{ @@ -2085,7 +2139,7 @@ func TestHandleContainer(t *testing.T) { }, OrchestratorCardTags: []string{}, LowCardTags: []string{ - "kube_gpu_type:nvidia", + "gpu_vendor:nvidia", }, StandardTags: []string{}, }, diff --git a/comp/core/tagger/taggerimpl/server/server.go b/comp/core/tagger/taggerimpl/server/server.go index 0542973b1074c..225e558e7070f 100644 --- a/comp/core/tagger/taggerimpl/server/server.go +++ b/comp/core/tagger/taggerimpl/server/server.go @@ -30,12 +30,14 @@ const ( // Server is a grpc server that streams tagger entities type Server struct { taggerComponent tagger.Component + maxEventSize int } // NewServer returns a new Server -func NewServer(t tagger.Component) *Server { +func NewServer(t tagger.Component, maxEventSize int) *Server { return &Server{ taggerComponent: t, + maxEventSize: maxEventSize, } } @@ -86,16 +88,20 @@ func (s *Server) TaggerStreamEntities(in *pb.StreamTagsRequest, out pb.AgentSecu responseEvents = append(responseEvents, e) } - err = grpc.DoWithTimeout(func() error { - return out.Send(&pb.StreamTagsResponse{ - Events: responseEvents, - }) - }, taggerStreamSendTimeout) + // Split events into chunks and send each one + chunks := splitEvents(responseEvents, s.maxEventSize) + for _, chunk := range chunks { + err = grpc.DoWithTimeout(func() error { + return out.Send(&pb.StreamTagsResponse{ + Events: chunk, + }) + }, taggerStreamSendTimeout) - if err != nil { - log.Warnf("error sending tagger event: %s", err) - s.taggerComponent.GetTaggerTelemetryStore().ServerStreamErrors.Inc() - return err + if err != nil { + log.Warnf("error sending tagger event: %s", err) + s.taggerComponent.GetTaggerTelemetryStore().ServerStreamErrors.Inc() + return err + } } case <-out.Context().Done(): diff --git a/comp/core/tagger/taggerimpl/server/util.go b/comp/core/tagger/taggerimpl/server/util.go new file mode 100644 index 0000000000000..28fc2c54a1f3d --- /dev/null +++ b/comp/core/tagger/taggerimpl/server/util.go @@ -0,0 +1,50 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package server + +import ( + "google.golang.org/protobuf/proto" + + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" +) + +// splitBySize splits the given slice into contiguous non-overlapping subslices such that +// the size of each sub-slice is at most maxChunkSize. +// The size of each item is calculated using computeSize +// +// This function assumes that the size of each single item of the initial slice is not larger than maxChunkSize +func splitBySize[T any](slice []T, maxChunkSize int, computeSize func(T) int) [][]T { + + // TODO: return an iter.Seq[[]T] instead of [][]T once we upgrade to golang v1.23 + // returning iter.Seq[[]T] has better performance in terms of memory consumption + var chunks [][]T + currentChunk := []T{} + currentSize := 0 + + for _, item := range slice { + eventSize := computeSize(item) + if currentSize+eventSize > maxChunkSize { + chunks = append(chunks, currentChunk) + currentChunk = []T{} + currentSize = 0 + } + currentChunk = append(currentChunk, item) + currentSize += eventSize + } + if len(currentChunk) > 0 { + chunks = append(chunks, currentChunk) + } + return chunks +} + +// splitEvents splits the array of events to chunks with at most maxChunkSize each +func splitEvents(events []*pb.StreamTagsEvent, maxChunkSize int) [][]*pb.StreamTagsEvent { + return splitBySize( + events, + maxChunkSize, + func(event *pb.StreamTagsEvent) int { return proto.Size(event) }, + ) +} diff --git a/comp/core/tagger/taggerimpl/server/util_test.go b/comp/core/tagger/taggerimpl/server/util_test.go new file mode 100644 index 0000000000000..76f94c4988630 --- /dev/null +++ b/comp/core/tagger/taggerimpl/server/util_test.go @@ -0,0 +1,105 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package server + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type mockStreamTagsEvent struct { + id int + size int +} + +func TestSplitEvents(t *testing.T) { + testCases := []struct { + name string + events []mockStreamTagsEvent + maxChunkSize int + expected [][]mockStreamTagsEvent // Expecting indices of events in chunks for easier comparison + }{ + { + name: "Empty input", + events: []mockStreamTagsEvent{}, + maxChunkSize: 100, + expected: nil, // No chunks expected + }, + { + name: "Single event within chunk size", + events: []mockStreamTagsEvent{ + {id: 1, size: 50}, // Mock event with size 50 + }, + maxChunkSize: 100, + expected: [][]mockStreamTagsEvent{ + { + {id: 1, size: 50}, // One chunk with one event + }, + }, + }, + { + name: "Multiple events all fit in one chunk", + events: []mockStreamTagsEvent{ + {id: 1, size: 20}, {id: 2, size: 30}, {id: 3, size: 40}, // Total size = 90 + }, + maxChunkSize: 100, + expected: [][]mockStreamTagsEvent{ + { + {id: 1, size: 20}, {id: 2, size: 30}, {id: 3, size: 40}, // All events fit in one chunk + }, + }, + }, + { + name: "Multiple events require splitting", + events: []mockStreamTagsEvent{ + {id: 1, size: 40}, {id: 2, size: 50}, {id: 3, size: 60}, // Total size = 150 + }, + maxChunkSize: 100, + expected: [][]mockStreamTagsEvent{ + { + {id: 1, size: 40}, + {id: 2, size: 50}, + }, + { + {id: 3, size: 60}, + }, // Last event in second chunk + }, + }, + { + name: "Events fit exactly in chunks", + events: []mockStreamTagsEvent{ + {id: 1, size: 50}, {id: 2, size: 50}, // Total size = 100 + }, + maxChunkSize: 100, + expected: [][]mockStreamTagsEvent{ + {{id: 1, size: 50}, {id: 2, size: 50}}, // Both events fit exactly in one chunk + }, + }, + { + name: "Event size exactly matches or exceeds chunk size", + events: []mockStreamTagsEvent{ + {id: 1, size: 100}, {id: 2, size: 101}, // One exactly fits, one exceeds + }, + maxChunkSize: 100, + expected: [][]mockStreamTagsEvent{ + { + {id: 1, size: 100}, + }, + { + {id: 2, size: 101}, + }, + }, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + chunks := splitBySize(testCase.events, testCase.maxChunkSize, func(e mockStreamTagsEvent) int { return e.size }) + assert.Equal(t, testCase.expected, chunks) + }) + } +} diff --git a/comp/core/tagger/tags/tags.go b/comp/core/tagger/tags/tags.go index 0889fbece7f41..68418bd7887c3 100644 --- a/comp/core/tagger/tags/tags.go +++ b/comp/core/tagger/tags/tags.go @@ -52,8 +52,6 @@ const ( KubeContainerName = "kube_container_name" // KubeOwnerRefKind is the tag for the Kubernetes owner reference kind KubeOwnerRefKind = "kube_ownerref_kind" - // KubeGPUType is the tag for the Kubernetes Resource GPU type - KubeGPUType = "kube_gpu_type" // KubePod is the tag for the pod name KubePod = "pod_name" @@ -91,6 +89,11 @@ const ( // KubeAppManagedBy is the tag for the "app.kubernetes.io/managed-by" Kubernetes label KubeAppManagedBy = "kube_app_managed_by" + // GPU related tags + + // KubeGPUVendor the tag for the Kubernetes Resource GPU vendor + KubeGPUVendor = "gpu_vendor" + // OpenshiftDeploymentConfig is the tag for the OpenShift deployment config name OpenshiftDeploymentConfig = "oshift_deployment_config" diff --git a/comp/core/workloadmeta/collectors/catalog-core/options.go b/comp/core/workloadmeta/collectors/catalog-core/options.go index 4738f78796a0a..ab0370acfffdb 100644 --- a/comp/core/workloadmeta/collectors/catalog-core/options.go +++ b/comp/core/workloadmeta/collectors/catalog-core/options.go @@ -17,7 +17,6 @@ import ( "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/docker" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/ecs" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/ecsfargate" - "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/host" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/kubeapiserver" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/kubelet" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/kubemetadata" @@ -39,7 +38,6 @@ func getCollectorOptions() []fx.Option { kubemetadata.GetFxOptions(), podman.GetFxOptions(), remoteprocesscollector.GetFxOptions(), - host.GetFxOptions(), process.GetFxOptions(), } } diff --git a/comp/core/workloadmeta/collectors/catalog-less/options.go b/comp/core/workloadmeta/collectors/catalog-less/options.go index 2f0feda6b9b34..515460e179a1e 100644 --- a/comp/core/workloadmeta/collectors/catalog-less/options.go +++ b/comp/core/workloadmeta/collectors/catalog-less/options.go @@ -17,7 +17,6 @@ import ( "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/docker" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/ecs" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/ecsfargate" - "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/host" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/kubeapiserver" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/kubelet" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/kubemetadata" @@ -41,6 +40,5 @@ func getCollectorOptions() []fx.Option { remoteworkloadmeta.GetFxOptions(), fx.Supply(remoteworkloadmeta.Params{}), processcollector.GetFxOptions(), - host.GetFxOptions(), } } diff --git a/comp/core/workloadmeta/collectors/catalog/all_options.go b/comp/core/workloadmeta/collectors/catalog/all_options.go index 616552b889217..2865a451a9fd1 100644 --- a/comp/core/workloadmeta/collectors/catalog/all_options.go +++ b/comp/core/workloadmeta/collectors/catalog/all_options.go @@ -22,7 +22,6 @@ import ( "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/docker" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/ecs" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/ecsfargate" - "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/host" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/kubeapiserver" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/kubelet" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/kubemetadata" @@ -46,6 +45,5 @@ func getCollectorOptions() []fx.Option { remoteworkloadmeta.GetFxOptions(), remoteWorkloadmetaParams(), processcollector.GetFxOptions(), - host.GetFxOptions(), } } diff --git a/comp/core/workloadmeta/collectors/internal/docker/docker.go b/comp/core/workloadmeta/collectors/internal/docker/docker.go index d65673750002a..0afaaf7558bd8 100644 --- a/comp/core/workloadmeta/collectors/internal/docker/docker.go +++ b/comp/core/workloadmeta/collectors/internal/docker/docker.go @@ -164,7 +164,7 @@ func (c *collector) stream(ctx context.Context) { err = c.dockerUtil.UnsubscribeFromContainerEvents("DockerCollector") if err != nil { - log.Warnf("error unsubscribbing from container events: %s", err) + log.Warnf("error unsubscribing from container events: %s", err) } err = health.Deregister() diff --git a/comp/core/workloadmeta/collectors/internal/host/host.go b/comp/core/workloadmeta/collectors/internal/host/host.go deleted file mode 100644 index 99864ce288002..0000000000000 --- a/comp/core/workloadmeta/collectors/internal/host/host.go +++ /dev/null @@ -1,116 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -// Package host implements the host tag Workloadmeta collector. -package host - -import ( - "context" - - "github.com/benbjohnson/clock" - "go.uber.org/fx" - - "github.com/DataDog/datadog-agent/comp/core/config" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - hostMetadataUtils "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/hosttags" - "github.com/DataDog/datadog-agent/pkg/util/log" -) - -const id = "host" - -type dependencies struct { - fx.In - - Config config.Component -} - -type collector struct { - store workloadmeta.Component - catalog workloadmeta.AgentType - config config.Component - clock clock.Clock - timeoutTimer *clock.Timer -} - -// GetFxOptions returns the FX framework options for the collector -func GetFxOptions() fx.Option { - return fx.Provide(NewCollector) -} - -// NewCollector returns a new host collector provider and an error -func NewCollector(deps dependencies) (workloadmeta.CollectorProvider, error) { - return workloadmeta.CollectorProvider{ - Collector: &collector{ - catalog: workloadmeta.NodeAgent | workloadmeta.ProcessAgent, - config: deps.Config, - clock: clock.New(), - }, - }, nil -} - -func (c *collector) Start(_ context.Context, store workloadmeta.Component) error { - - c.store = store - - duration := c.config.GetDuration("expected_tags_duration") - if duration <= 0 { - return nil - } - - log.Debugf("Adding host tags to metrics for %v", duration) - c.timeoutTimer = c.clock.Timer(duration) - - return nil -} - -func (c *collector) Pull(ctx context.Context) error { - // Feature is disabled or timeout has previously occurred - if c.timeoutTimer == nil { - return nil - } - - // Timeout reached - expire any host tags in the store - if c.resetTimerIfTimedOut() { - c.store.Notify(makeEvent([]string{})) - return nil - } - - tags := hostMetadataUtils.Get(ctx, false, c.config).System - c.store.Notify(makeEvent(tags)) - return nil -} - -func (c *collector) GetID() string { - return id -} - -func (c *collector) GetTargetCatalog() workloadmeta.AgentType { - return c.catalog -} - -func (c *collector) resetTimerIfTimedOut() bool { - select { - case <-c.timeoutTimer.C: - c.timeoutTimer = nil - return true - default: - return false - } -} - -func makeEvent(tags []string) []workloadmeta.CollectorEvent { - return []workloadmeta.CollectorEvent{ - { - Type: workloadmeta.EventTypeSet, - Source: workloadmeta.SourceHost, - Entity: &workloadmeta.HostTags{ - EntityID: workloadmeta.EntityID{ - Kind: workloadmeta.KindHost, - ID: id, - }, - HostTags: tags, - }, - }} -} diff --git a/comp/core/workloadmeta/collectors/internal/host/host_test.go b/comp/core/workloadmeta/collectors/internal/host/host_test.go deleted file mode 100644 index a7b49a782cb74..0000000000000 --- a/comp/core/workloadmeta/collectors/internal/host/host_test.go +++ /dev/null @@ -1,128 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -// Package host implements the host tag Workloadmeta collector. -package host - -import ( - "context" - "sync" - "testing" - "time" - - "github.com/benbjohnson/clock" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/fx" - - "github.com/DataDog/datadog-agent/comp/core" - "github.com/DataDog/datadog-agent/comp/core/config" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" - workloadmetamock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/mock" - "github.com/DataDog/datadog-agent/pkg/util/fxutil" -) - -type testDeps struct { - fx.In - - Config config.Component - Wml workloadmetamock.Mock -} - -func TestHostCollector(t *testing.T) { - expectedTags := []string{"tag1:value1", "tag2", "tag3"} - ctx := context.TODO() - - overrides := map[string]interface{}{ - "tags": expectedTags, - "expected_tags_duration": "10m", - } - - deps := fxutil.Test[testDeps](t, fx.Options( - fx.Replace(config.MockParams{Overrides: overrides}), - core.MockBundle(), - fx.Supply(context.Background()), - workloadmetafxmock.MockModule(workloadmeta.NewParams()), - )) - - mockClock := clock.NewMock() - c := collector{ - config: deps.Config, - clock: mockClock, - } - - err := c.Start(ctx, deps.Wml) - require.NoError(t, err) - - expectedWorkloadmetaEvents := []workloadmeta.Event{ - { - // Event generated by the first Pull() call - Type: workloadmeta.EventTypeSet, - Entity: &workloadmeta.HostTags{ - EntityID: workloadmeta.EntityID{ - Kind: workloadmeta.KindHost, - ID: "host", - }, - HostTags: expectedTags, - }, - }, - { - // Event generated by the second Pull() call after more than - // "config.expected_tags_duration" has passed - Type: workloadmeta.EventTypeSet, - Entity: &workloadmeta.HostTags{ - EntityID: workloadmeta.EntityID{ - Kind: workloadmeta.KindHost, - ID: "host", - }, - HostTags: []string{}, - }, - }, - } - - // Create a subscriber in a different goroutine and check that it receives - // the expected events - var wg sync.WaitGroup - wg.Add(1) - go func() { - assertExpectedEventsAreReceived(t, deps.Wml, 10*time.Second, expectedWorkloadmetaEvents) - wg.Done() - }() - - err = c.Pull(ctx) - require.NoError(t, err) - - mockClock.Add(11 * time.Minute) // Notice that this is more than the expected_tags_duration defined above - mockClock.WaitForAllTimers() - - err = c.Pull(ctx) - require.NoError(t, err) - - wg.Wait() -} - -func assertExpectedEventsAreReceived(t *testing.T, wlmeta workloadmeta.Component, timeout time.Duration, expectedEvents []workloadmeta.Event) { - eventChan := wlmeta.Subscribe( - "host-test", - workloadmeta.NormalPriority, - workloadmeta.NewFilterBuilder().AddKind(workloadmeta.KindHost).Build(), - ) - defer wlmeta.Unsubscribe(eventChan) - - var receivedEvents []workloadmeta.Event - - for len(receivedEvents) < len(expectedEvents) { - select { - case eventBundle := <-eventChan: - eventBundle.Acknowledge() - receivedEvents = append(receivedEvents, eventBundle.Events...) - case <-time.After(timeout): - require.Fail(t, "timed out waiting for event") - } - } - - assert.ElementsMatch(t, expectedEvents, receivedEvents) -} diff --git a/comp/core/workloadmeta/collectors/internal/kubelet/kubelet.go b/comp/core/workloadmeta/collectors/internal/kubelet/kubelet.go index 183865742fdd5..c9e4960d80572 100644 --- a/comp/core/workloadmeta/collectors/internal/kubelet/kubelet.go +++ b/comp/core/workloadmeta/collectors/internal/kubelet/kubelet.go @@ -145,6 +145,8 @@ func (c *collector) parsePods(pods []*kubelet.Pod) []workloadmeta.CollectorEvent &podID, ) + GPUVendors := getGPUVendorsFromContainers(initContainerEvents, containerEvents) + podOwners := pod.Owners() owners := make([]workloadmeta.KubernetesPodOwner, 0, len(podOwners)) for _, o := range podOwners { @@ -175,6 +177,7 @@ func (c *collector) parsePods(pods []*kubelet.Pod) []workloadmeta.CollectorEvent IP: pod.Status.PodIP, PriorityClass: pod.Spec.PriorityClassName, QOSClass: pod.Status.QOSClass, + GPUVendorList: GPUVendors, RuntimeClass: RuntimeClassName, SecurityContext: PodSecurityContext, } @@ -313,6 +316,23 @@ func (c *collector) parsePodContainers( return podContainers, events } +func getGPUVendorsFromContainers(initContainerEvents, containerEvents []workloadmeta.CollectorEvent) []string { + gpuUniqueTypes := make(map[string]bool) + for _, event := range append(initContainerEvents, containerEvents...) { + container := event.Entity.(*workloadmeta.Container) + for _, GPUVendor := range container.Resources.GPUVendorList { + gpuUniqueTypes[GPUVendor] = true + } + } + + GPUVendors := make([]string, 0, len(gpuUniqueTypes)) + for GPUVendor := range gpuUniqueTypes { + GPUVendors = append(GPUVendors, GPUVendor) + } + + return GPUVendors +} + func extractPodRuntimeClassName(spec *kubelet.Spec) string { if spec.RuntimeClassName == nil { return "" @@ -398,20 +418,19 @@ func extractEnvFromSpec(envSpec []kubelet.EnvVar) map[string]string { return env } -func extractSimpleGPUName(gpuName kubelet.ResourceName) string { - simpleName := "" - switch gpuName { - case kubelet.ResourceNvidiaGPU: - simpleName = "nvidia" - case kubelet.ResourceAMDGPU: - simpleName = "amd" - case kubelet.ResourceIntelGPUxe, kubelet.ResourceIntelGPUi915: - simpleName = "intel" +func extractGPUVendor(gpuNamePrefix kubelet.ResourceName) string { + gpuVendor := "" + switch gpuNamePrefix { + case kubelet.ResourcePrefixNvidiaMIG, kubelet.ResourceGenericNvidiaGPU: + gpuVendor = "nvidia" + case kubelet.ResourcePrefixAMDGPU: + gpuVendor = "amd" + case kubelet.ResourcePrefixIntelGPU: + gpuVendor = "intel" default: - simpleName = string(gpuName) + gpuVendor = string(gpuNamePrefix) } - - return simpleName + return gpuVendor } func extractResources(spec *kubelet.ContainerSpec) workloadmeta.ContainerResources { @@ -425,13 +444,29 @@ func extractResources(spec *kubelet.ContainerSpec) workloadmeta.ContainerResourc } // extract GPU resource info from the possible GPU sources - for _, gpuResource := range kubelet.GetGPUResourceNames() { - if gpuReq, found := spec.Resources.Requests[gpuResource]; found { - resources.GPURequest = pointer.Ptr(uint64(gpuReq.Value())) - resources.GPUType = extractSimpleGPUName(gpuResource) - break + uniqueGPUVendor := make(map[string]bool) + + resourceKeys := make([]kubelet.ResourceName, 0, len(spec.Resources.Requests)) + for resourceName := range spec.Resources.Requests { + resourceKeys = append(resourceKeys, resourceName) + } + + for _, gpuResourceName := range kubelet.GetGPUResourceNames() { + for _, resourceKey := range resourceKeys { + if strings.HasPrefix(string(resourceKey), string(gpuResourceName)) { + if gpuReq, found := spec.Resources.Requests[resourceKey]; found { + resources.GPURequest = pointer.Ptr(uint64(gpuReq.Value())) + uniqueGPUVendor[extractGPUVendor(gpuResourceName)] = true + break + } + } } } + gpuVendorList := make([]string, 0, len(uniqueGPUVendor)) + for GPUVendor := range uniqueGPUVendor { + gpuVendorList = append(gpuVendorList, GPUVendor) + } + resources.GPUVendorList = gpuVendorList return resources } diff --git a/comp/core/workloadmeta/def/types.go b/comp/core/workloadmeta/def/types.go index d266ce6073bdb..9d50f7f88ed78 100644 --- a/comp/core/workloadmeta/def/types.go +++ b/comp/core/workloadmeta/def/types.go @@ -47,7 +47,6 @@ const ( KindECSTask Kind = "ecs_task" KindContainerImageMetadata Kind = "container_image_metadata" KindProcess Kind = "process" - KindHost Kind = "host" ) // Source is the source name of an entity. @@ -422,7 +421,7 @@ func (c ContainerHealthStatus) String(verbose bool) string { type ContainerResources struct { GPURequest *uint64 // Number of GPUs GPULimit *uint64 - GPUType string // The type of GPU requested (eg. nvidia, amd, intel) + GPUVendorList []string // The type of GPU requested (eg. nvidia, amd, intel) CPURequest *float64 // Percentage 0-100*numCPU (aligned with CPU Limit from metrics provider) CPULimit *float64 MemoryRequest *uint64 // Bytes @@ -444,8 +443,8 @@ func (cr ContainerResources) String(bool) string { if cr.MemoryLimit != nil { _, _ = fmt.Fprintln(&sb, "TargetMemoryLimit:", *cr.MemoryLimit) } - if cr.GPUType != "" { - _, _ = fmt.Fprintln(&sb, "GPUType:", cr.GPUType) + if cr.GPUVendorList != nil { + _, _ = fmt.Fprintln(&sb, "GPUVendor:", cr.GPUVendorList) } return sb.String() } @@ -678,6 +677,7 @@ type KubernetesPod struct { IP string PriorityClass string QOSClass string + GPUVendorList []string RuntimeClass string KubeServices []string NamespaceLabels map[string]string @@ -745,6 +745,7 @@ func (p KubernetesPod) String(verbose bool) string { if verbose { _, _ = fmt.Fprintln(&sb, "Priority Class:", p.PriorityClass) _, _ = fmt.Fprintln(&sb, "QOS Class:", p.QOSClass) + _, _ = fmt.Fprintln(&sb, "GPU Vendor:", p.GPUVendorList) _, _ = fmt.Fprintln(&sb, "Runtime Class:", p.RuntimeClass) _, _ = fmt.Fprintln(&sb, "PVCs:", sliceToString(p.PersistentVolumeClaimNames)) _, _ = fmt.Fprintln(&sb, "Kube Services:", sliceToString(p.KubeServices)) diff --git a/comp/core/workloadmeta/impl/dump_test.go b/comp/core/workloadmeta/impl/dump_test.go index 13cdae7309aa6..67f13f174e346 100644 --- a/comp/core/workloadmeta/impl/dump_test.go +++ b/comp/core/workloadmeta/impl/dump_test.go @@ -28,7 +28,7 @@ func TestDump(t *testing.T) { Name: "ctr-image", }, Resources: wmdef.ContainerResources{ - GPUType: "nvidia", + GPUVendorList: []string{"nvidia"}, }, Runtime: wmdef.ContainerRuntimeDocker, RuntimeFlavor: wmdef.ContainerRuntimeFlavorKata, @@ -53,9 +53,6 @@ func TestDump(t *testing.T) { }, PID: 1, CgroupPath: "/default/ctr-id", - Resources: wmdef.ContainerResources{ - GPUType: "nvidia", - }, } s.handleEvents([]wmdef.CollectorEvent{ @@ -89,7 +86,7 @@ Runtime: docker RuntimeFlavor: kata Running: false ----------- Resources ----------- -GPUType: nvidia +GPUVendor: [nvidia] `, }, }, @@ -127,7 +124,7 @@ Created At: 0001-01-01 00:00:00 +0000 UTC Started At: 0001-01-01 00:00:00 +0000 UTC Finished At: 0001-01-01 00:00:00 +0000 UTC ----------- Resources ----------- -GPUType: nvidia +GPUVendor: [nvidia] Hostname: Network IPs: PID: 0 @@ -157,7 +154,6 @@ Created At: 0001-01-01 00:00:00 +0000 UTC Started At: 0001-01-01 00:00:00 +0000 UTC Finished At: 0001-01-01 00:00:00 +0000 UTC ----------- Resources ----------- -GPUType: nvidia Hostname: Network IPs: PID: 1 @@ -187,7 +183,7 @@ Created At: 0001-01-01 00:00:00 +0000 UTC Started At: 0001-01-01 00:00:00 +0000 UTC Finished At: 0001-01-01 00:00:00 +0000 UTC ----------- Resources ----------- -GPUType: nvidia +GPUVendor: [nvidia] Hostname: Network IPs: PID: 1 diff --git a/comp/metadata/inventoryagent/README.md b/comp/metadata/inventoryagent/README.md index 8d542dfa36eeb..855907ff4e9d6 100644 --- a/comp/metadata/inventoryagent/README.md +++ b/comp/metadata/inventoryagent/README.md @@ -100,7 +100,6 @@ The payload is a JSON dict with the following fields - `feature_cws_network_enabled` - **bool**: True if Network Monitoring is enabled for Cloud Workload Security (see: `event_monitoring_config.network.enabled` config option). - `feature_cws_remote_config_enabled` - **bool**: True if Remote Config is enabled for Cloud Workload Security (see: `runtime_security_config.remote_configuration.enabled` config option). - `feature_cws_security_profiles_enabled` - **bool**: True if Security Profiles is enabled for Cloud Workload Security (see: `runtime_security_config.activity_dump.enabled` config option). - - `feature_usm_http_by_status_code_enabled` - **bool**: True if HTTP Stats by Status Code is enabled for Universal Service Monitoring (see: `service_monitoring_config.enable_http_stats_by_status_code` config option). - `feature_usm_istio_enabled` - **bool**: True if Istio is enabled for Universal Service Monitoring (see: `service_monitoring_config.tls.istio.enabled` config option). - `feature_windows_crash_detection_enabled` - **bool**: True if Windows Crash Detection is enabled (see: `windows_crash_detection.enabled` config option). - `full_configuration` - **string**: the current Agent configuration scrubbed, including all the defaults, as a YAML diff --git a/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent.go b/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent.go index 095c6e9effce9..1c9435cabc9bd 100644 --- a/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent.go +++ b/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent.go @@ -301,7 +301,6 @@ func (ia *inventoryagent) fetchSystemProbeMetadata() { ia.data["feature_usm_redis_enabled"] = sysProbeConf.GetBool("service_monitoring_config.enable_redis_monitoring") ia.data["feature_usm_http2_enabled"] = sysProbeConf.GetBool("service_monitoring_config.enable_http2_monitoring") ia.data["feature_usm_istio_enabled"] = sysProbeConf.GetBool("service_monitoring_config.tls.istio.enabled") - ia.data["feature_usm_http_by_status_code_enabled"] = sysProbeConf.GetBool("service_monitoring_config.enable_http_stats_by_status_code") ia.data["feature_usm_go_tls_enabled"] = sysProbeConf.GetBool("service_monitoring_config.tls.go.enabled") // Discovery module / system-probe diff --git a/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent_test.go b/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent_test.go index 3038e093ff093..e57527e82cbaf 100644 --- a/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent_test.go +++ b/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent_test.go @@ -108,42 +108,41 @@ func TestInitDataErrorInstallInfo(t *testing.T) { func TestInitData(t *testing.T) { sysprobeOverrides := map[string]any{ - "dynamic_instrumentation.enabled": true, - "remote_configuration.enabled": true, - "runtime_security_config.enabled": true, - "event_monitoring_config.network.enabled": true, - "runtime_security_config.activity_dump.enabled": true, - "runtime_security_config.remote_configuration.enabled": true, - "network_config.enabled": true, - "service_monitoring_config.enable_http_monitoring": true, - "service_monitoring_config.tls.native.enabled": true, - "service_monitoring_config.enabled": true, - "service_monitoring_config.tls.java.enabled": true, - "service_monitoring_config.enable_http2_monitoring": true, - "service_monitoring_config.enable_kafka_monitoring": true, - "service_monitoring_config.enable_postgres_monitoring": true, - "service_monitoring_config.enable_redis_monitoring": true, - "service_monitoring_config.tls.istio.enabled": true, - "service_monitoring_config.enable_http_stats_by_status_code": true, - "service_monitoring_config.tls.go.enabled": true, - "discovery.enabled": true, - "system_probe_config.enable_tcp_queue_length": true, - "system_probe_config.enable_oom_kill": true, - "windows_crash_detection.enabled": true, - "system_probe_config.enable_co_re": true, - "system_probe_config.enable_runtime_compiler": true, - "system_probe_config.enable_kernel_header_download": true, - "system_probe_config.allow_precompiled_fallback": true, - "system_probe_config.telemetry_enabled": true, - "system_probe_config.max_conns_per_message": 10, - "system_probe_config.disable_ipv6": false, - "network_config.collect_tcp_v4": true, - "network_config.collect_tcp_v6": true, - "network_config.collect_udp_v4": true, - "network_config.collect_udp_v6": true, - "network_config.enable_protocol_classification": true, - "network_config.enable_gateway_lookup": true, - "network_config.enable_root_netns": true, + "dynamic_instrumentation.enabled": true, + "remote_configuration.enabled": true, + "runtime_security_config.enabled": true, + "event_monitoring_config.network.enabled": true, + "runtime_security_config.activity_dump.enabled": true, + "runtime_security_config.remote_configuration.enabled": true, + "network_config.enabled": true, + "service_monitoring_config.enable_http_monitoring": true, + "service_monitoring_config.tls.native.enabled": true, + "service_monitoring_config.enabled": true, + "service_monitoring_config.tls.java.enabled": true, + "service_monitoring_config.enable_http2_monitoring": true, + "service_monitoring_config.enable_kafka_monitoring": true, + "service_monitoring_config.enable_postgres_monitoring": true, + "service_monitoring_config.enable_redis_monitoring": true, + "service_monitoring_config.tls.istio.enabled": true, + "service_monitoring_config.tls.go.enabled": true, + "discovery.enabled": true, + "system_probe_config.enable_tcp_queue_length": true, + "system_probe_config.enable_oom_kill": true, + "windows_crash_detection.enabled": true, + "system_probe_config.enable_co_re": true, + "system_probe_config.enable_runtime_compiler": true, + "system_probe_config.enable_kernel_header_download": true, + "system_probe_config.allow_precompiled_fallback": true, + "system_probe_config.telemetry_enabled": true, + "system_probe_config.max_conns_per_message": 10, + "system_probe_config.disable_ipv6": false, + "network_config.collect_tcp_v4": true, + "network_config.collect_tcp_v6": true, + "network_config.collect_udp_v4": true, + "network_config.collect_udp_v6": true, + "network_config.enable_protocol_classification": true, + "network_config.enable_gateway_lookup": true, + "network_config.enable_root_netns": true, } overrides := map[string]any{ @@ -218,7 +217,6 @@ func TestInitData(t *testing.T) { "feature_usm_redis_enabled": true, "feature_usm_http2_enabled": true, "feature_usm_istio_enabled": true, - "feature_usm_http_by_status_code_enabled": true, "feature_usm_go_tls_enabled": true, "feature_discovery_enabled": true, "feature_tcp_queue_length_enabled": true, @@ -491,7 +489,6 @@ func TestFetchSystemProbeAgent(t *testing.T) { assert.False(t, ia.data["feature_usm_redis_enabled"].(bool)) assert.False(t, ia.data["feature_usm_http2_enabled"].(bool)) assert.False(t, ia.data["feature_usm_istio_enabled"].(bool)) - assert.True(t, ia.data["feature_usm_http_by_status_code_enabled"].(bool)) assert.False(t, ia.data["feature_usm_go_tls_enabled"].(bool)) assert.False(t, ia.data["feature_discovery_enabled"].(bool)) assert.False(t, ia.data["feature_tcp_queue_length_enabled"].(bool)) @@ -543,7 +540,6 @@ func TestFetchSystemProbeAgent(t *testing.T) { assert.False(t, ia.data["feature_usm_postgres_enabled"].(bool)) assert.False(t, ia.data["feature_usm_http2_enabled"].(bool)) assert.False(t, ia.data["feature_usm_istio_enabled"].(bool)) - assert.False(t, ia.data["feature_usm_http_by_status_code_enabled"].(bool)) assert.False(t, ia.data["feature_usm_go_tls_enabled"].(bool)) assert.False(t, ia.data["feature_discovery_enabled"].(bool)) assert.False(t, ia.data["feature_tcp_queue_length_enabled"].(bool)) @@ -643,7 +639,6 @@ dynamic_instrumentation: assert.True(t, ia.data["feature_usm_redis_enabled"].(bool)) assert.True(t, ia.data["feature_usm_http2_enabled"].(bool)) assert.True(t, ia.data["feature_usm_istio_enabled"].(bool)) - assert.True(t, ia.data["feature_usm_http_by_status_code_enabled"].(bool)) assert.True(t, ia.data["feature_usm_go_tls_enabled"].(bool)) assert.True(t, ia.data["feature_discovery_enabled"].(bool)) assert.True(t, ia.data["feature_tcp_queue_length_enabled"].(bool)) diff --git a/comp/otelcol/otlp/integrationtest/integration_test.go b/comp/otelcol/otlp/integrationtest/integration_test.go index d80e75f2ad6d7..659025608087a 100644 --- a/comp/otelcol/otlp/integrationtest/integration_test.go +++ b/comp/otelcol/otlp/integrationtest/integration_test.go @@ -103,6 +103,7 @@ func runTestOTelAgent(ctx context.Context, params *subcommands.GlobalParams) err pkgconfigenv.DetectFeatures(c) return c, nil }), + fxutil.ProvideOptional[coreconfig.Component](), fx.Provide(func() []string { return append(params.ConfPaths, params.Sets...) }), diff --git a/docs/cloud-workload-security/linux_expressions.md b/docs/cloud-workload-security/linux_expressions.md index 196c1c8ecfa7c..51fa104e4c68a 100644 --- a/docs/cloud-workload-security/linux_expressions.md +++ b/docs/cloud-workload-security/linux_expressions.md @@ -166,12 +166,12 @@ The *file.rights* attribute can now be used in addition to *file.mode*. *file.mo | [`process.ancestors.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`process.ancestors.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`process.ancestors.file.name`](#common-fileevent-name-doc) | File's basename | -| [`process.ancestors.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`process.ancestors.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`process.ancestors.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`process.ancestors.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`process.ancestors.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`process.ancestors.file.path`](#common-fileevent-path-doc) | File's path | -| [`process.ancestors.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`process.ancestors.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`process.ancestors.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`process.ancestors.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`process.ancestors.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -192,17 +192,18 @@ The *file.rights* attribute can now be used in addition to *file.mode*. *file.mo | [`process.ancestors.interpreter.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`process.ancestors.interpreter.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`process.ancestors.interpreter.file.name`](#common-fileevent-name-doc) | File's basename | -| [`process.ancestors.interpreter.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`process.ancestors.interpreter.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`process.ancestors.interpreter.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`process.ancestors.interpreter.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`process.ancestors.interpreter.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`process.ancestors.interpreter.file.path`](#common-fileevent-path-doc) | File's path | -| [`process.ancestors.interpreter.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`process.ancestors.interpreter.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`process.ancestors.interpreter.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`process.ancestors.interpreter.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`process.ancestors.interpreter.file.user`](#common-filefields-user-doc) | User of the file's owner | | [`process.ancestors.is_kworker`](#common-pidcontext-is_kworker-doc) | Indicates whether the process is a kworker | | [`process.ancestors.is_thread`](#common-process-is_thread-doc) | Indicates whether the process is considered a thread (that is, a child process that hasn't executed another program) | +| [`process.ancestors.length`](#common-string-length-doc) | Length of the corresponding element | | [`process.ancestors.pid`](#common-pidcontext-pid-doc) | Process ID of the process (also called thread group ID) | | [`process.ancestors.ppid`](#common-process-ppid-doc) | Parent process ID | | [`process.ancestors.tid`](#common-pidcontext-tid-doc) | Thread ID of the thread | @@ -246,12 +247,12 @@ The *file.rights* attribute can now be used in addition to *file.mode*. *file.mo | [`process.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`process.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`process.file.name`](#common-fileevent-name-doc) | File's basename | -| [`process.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`process.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`process.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`process.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`process.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`process.file.path`](#common-fileevent-path-doc) | File's path | -| [`process.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`process.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`process.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`process.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`process.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -272,12 +273,12 @@ The *file.rights* attribute can now be used in addition to *file.mode*. *file.mo | [`process.interpreter.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`process.interpreter.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`process.interpreter.file.name`](#common-fileevent-name-doc) | File's basename | -| [`process.interpreter.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`process.interpreter.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`process.interpreter.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`process.interpreter.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`process.interpreter.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`process.interpreter.file.path`](#common-fileevent-path-doc) | File's path | -| [`process.interpreter.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`process.interpreter.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`process.interpreter.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`process.interpreter.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`process.interpreter.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -317,12 +318,12 @@ The *file.rights* attribute can now be used in addition to *file.mode*. *file.mo | [`process.parent.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`process.parent.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`process.parent.file.name`](#common-fileevent-name-doc) | File's basename | -| [`process.parent.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`process.parent.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`process.parent.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`process.parent.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`process.parent.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`process.parent.file.path`](#common-fileevent-path-doc) | File's path | -| [`process.parent.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`process.parent.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`process.parent.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`process.parent.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`process.parent.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -343,12 +344,12 @@ The *file.rights* attribute can now be used in addition to *file.mode*. *file.mo | [`process.parent.interpreter.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`process.parent.interpreter.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`process.parent.interpreter.file.name`](#common-fileevent-name-doc) | File's basename | -| [`process.parent.interpreter.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`process.parent.interpreter.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`process.parent.interpreter.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`process.parent.interpreter.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`process.parent.interpreter.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`process.parent.interpreter.file.path`](#common-fileevent-path-doc) | File's path | -| [`process.parent.interpreter.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`process.parent.interpreter.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`process.parent.interpreter.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`process.parent.interpreter.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`process.parent.interpreter.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -428,12 +429,12 @@ A process changed the current directory | [`chdir.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`chdir.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`chdir.file.name`](#common-fileevent-name-doc) | File's basename | -| [`chdir.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`chdir.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`chdir.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`chdir.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`chdir.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`chdir.file.path`](#common-fileevent-path-doc) | File's path | -| [`chdir.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`chdir.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`chdir.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`chdir.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`chdir.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -459,12 +460,12 @@ A file’s permissions were changed | [`chmod.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`chmod.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`chmod.file.name`](#common-fileevent-name-doc) | File's basename | -| [`chmod.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`chmod.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`chmod.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`chmod.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`chmod.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`chmod.file.path`](#common-fileevent-path-doc) | File's path | -| [`chmod.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`chmod.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`chmod.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`chmod.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`chmod.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -493,12 +494,12 @@ A file’s owner was changed | [`chown.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`chown.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`chown.file.name`](#common-fileevent-name-doc) | File's basename | -| [`chown.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`chown.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`chown.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`chown.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`chown.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`chown.file.path`](#common-fileevent-path-doc) | File's path | -| [`chown.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`chown.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`chown.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`chown.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`chown.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -518,7 +519,7 @@ A DNS request was sent | [`dns.question.count`](#dns-question-count-doc) | the total count of questions in the DNS request | | [`dns.question.length`](#dns-question-length-doc) | the total DNS request size in bytes | | [`dns.question.name`](#dns-question-name-doc) | the queried domain name | -| [`dns.question.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`dns.question.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`dns.question.type`](#dns-question-type-doc) | a two octet code which specifies the DNS question type | | [`network.destination.ip`](#common-ipportcontext-ip-doc) | IP address | | [`network.destination.port`](#common-ipportcontext-port-doc) | Port number | @@ -569,12 +570,12 @@ A process was executed or forked | [`exec.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`exec.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`exec.file.name`](#common-fileevent-name-doc) | File's basename | -| [`exec.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`exec.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`exec.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`exec.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`exec.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`exec.file.path`](#common-fileevent-path-doc) | File's path | -| [`exec.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`exec.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`exec.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`exec.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`exec.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -595,12 +596,12 @@ A process was executed or forked | [`exec.interpreter.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`exec.interpreter.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`exec.interpreter.file.name`](#common-fileevent-name-doc) | File's basename | -| [`exec.interpreter.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`exec.interpreter.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`exec.interpreter.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`exec.interpreter.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`exec.interpreter.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`exec.interpreter.file.path`](#common-fileevent-path-doc) | File's path | -| [`exec.interpreter.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`exec.interpreter.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`exec.interpreter.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`exec.interpreter.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`exec.interpreter.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -659,12 +660,12 @@ A process was terminated | [`exit.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`exit.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`exit.file.name`](#common-fileevent-name-doc) | File's basename | -| [`exit.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`exit.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`exit.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`exit.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`exit.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`exit.file.path`](#common-fileevent-path-doc) | File's path | -| [`exit.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`exit.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`exit.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`exit.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`exit.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -685,12 +686,12 @@ A process was terminated | [`exit.interpreter.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`exit.interpreter.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`exit.interpreter.file.name`](#common-fileevent-name-doc) | File's basename | -| [`exit.interpreter.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`exit.interpreter.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`exit.interpreter.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`exit.interpreter.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`exit.interpreter.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`exit.interpreter.file.path`](#common-fileevent-path-doc) | File's path | -| [`exit.interpreter.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`exit.interpreter.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`exit.interpreter.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`exit.interpreter.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`exit.interpreter.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -747,12 +748,12 @@ Create a new name/alias for a file | [`link.file.destination.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`link.file.destination.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`link.file.destination.name`](#common-fileevent-name-doc) | File's basename | -| [`link.file.destination.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`link.file.destination.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`link.file.destination.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`link.file.destination.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`link.file.destination.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`link.file.destination.path`](#common-fileevent-path-doc) | File's path | -| [`link.file.destination.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`link.file.destination.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`link.file.destination.rights`](#common-filefields-rights-doc) | Rights of the file | | [`link.file.destination.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`link.file.destination.user`](#common-filefields-user-doc) | User of the file's owner | @@ -766,12 +767,12 @@ Create a new name/alias for a file | [`link.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`link.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`link.file.name`](#common-fileevent-name-doc) | File's basename | -| [`link.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`link.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`link.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`link.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`link.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`link.file.path`](#common-fileevent-path-doc) | File's path | -| [`link.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`link.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`link.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`link.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`link.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -799,12 +800,12 @@ A new kernel module was loaded | [`load_module.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`load_module.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`load_module.file.name`](#common-fileevent-name-doc) | File's basename | -| [`load_module.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`load_module.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`load_module.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`load_module.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`load_module.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`load_module.file.path`](#common-fileevent-path-doc) | File's path | -| [`load_module.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`load_module.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`load_module.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`load_module.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`load_module.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -831,12 +832,12 @@ A directory was created | [`mkdir.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`mkdir.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`mkdir.file.name`](#common-fileevent-name-doc) | File's basename | -| [`mkdir.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`mkdir.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`mkdir.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`mkdir.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`mkdir.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`mkdir.file.path`](#common-fileevent-path-doc) | File's path | -| [`mkdir.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`mkdir.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`mkdir.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`mkdir.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`mkdir.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -859,12 +860,12 @@ A mmap command was executed | [`mmap.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`mmap.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`mmap.file.name`](#common-fileevent-name-doc) | File's basename | -| [`mmap.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`mmap.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`mmap.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`mmap.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`mmap.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`mmap.file.path`](#common-fileevent-path-doc) | File's path | -| [`mmap.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`mmap.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`mmap.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`mmap.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`mmap.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -917,12 +918,12 @@ A file was opened | [`open.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`open.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`open.file.name`](#common-fileevent-name-doc) | File's basename | -| [`open.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`open.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`open.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`open.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`open.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`open.file.path`](#common-fileevent-path-doc) | File's path | -| [`open.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`open.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`open.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`open.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`open.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -990,12 +991,12 @@ A ptrace command was executed | [`ptrace.tracee.ancestors.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`ptrace.tracee.ancestors.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`ptrace.tracee.ancestors.file.name`](#common-fileevent-name-doc) | File's basename | -| [`ptrace.tracee.ancestors.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`ptrace.tracee.ancestors.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`ptrace.tracee.ancestors.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`ptrace.tracee.ancestors.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`ptrace.tracee.ancestors.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`ptrace.tracee.ancestors.file.path`](#common-fileevent-path-doc) | File's path | -| [`ptrace.tracee.ancestors.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`ptrace.tracee.ancestors.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`ptrace.tracee.ancestors.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`ptrace.tracee.ancestors.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`ptrace.tracee.ancestors.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -1016,17 +1017,18 @@ A ptrace command was executed | [`ptrace.tracee.ancestors.interpreter.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`ptrace.tracee.ancestors.interpreter.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`ptrace.tracee.ancestors.interpreter.file.name`](#common-fileevent-name-doc) | File's basename | -| [`ptrace.tracee.ancestors.interpreter.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`ptrace.tracee.ancestors.interpreter.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`ptrace.tracee.ancestors.interpreter.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`ptrace.tracee.ancestors.interpreter.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`ptrace.tracee.ancestors.interpreter.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`ptrace.tracee.ancestors.interpreter.file.path`](#common-fileevent-path-doc) | File's path | -| [`ptrace.tracee.ancestors.interpreter.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`ptrace.tracee.ancestors.interpreter.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`ptrace.tracee.ancestors.interpreter.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`ptrace.tracee.ancestors.interpreter.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`ptrace.tracee.ancestors.interpreter.file.user`](#common-filefields-user-doc) | User of the file's owner | | [`ptrace.tracee.ancestors.is_kworker`](#common-pidcontext-is_kworker-doc) | Indicates whether the process is a kworker | | [`ptrace.tracee.ancestors.is_thread`](#common-process-is_thread-doc) | Indicates whether the process is considered a thread (that is, a child process that hasn't executed another program) | +| [`ptrace.tracee.ancestors.length`](#common-string-length-doc) | Length of the corresponding element | | [`ptrace.tracee.ancestors.pid`](#common-pidcontext-pid-doc) | Process ID of the process (also called thread group ID) | | [`ptrace.tracee.ancestors.ppid`](#common-process-ppid-doc) | Parent process ID | | [`ptrace.tracee.ancestors.tid`](#common-pidcontext-tid-doc) | Thread ID of the thread | @@ -1070,12 +1072,12 @@ A ptrace command was executed | [`ptrace.tracee.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`ptrace.tracee.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`ptrace.tracee.file.name`](#common-fileevent-name-doc) | File's basename | -| [`ptrace.tracee.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`ptrace.tracee.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`ptrace.tracee.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`ptrace.tracee.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`ptrace.tracee.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`ptrace.tracee.file.path`](#common-fileevent-path-doc) | File's path | -| [`ptrace.tracee.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`ptrace.tracee.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`ptrace.tracee.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`ptrace.tracee.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`ptrace.tracee.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -1096,12 +1098,12 @@ A ptrace command was executed | [`ptrace.tracee.interpreter.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`ptrace.tracee.interpreter.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`ptrace.tracee.interpreter.file.name`](#common-fileevent-name-doc) | File's basename | -| [`ptrace.tracee.interpreter.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`ptrace.tracee.interpreter.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`ptrace.tracee.interpreter.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`ptrace.tracee.interpreter.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`ptrace.tracee.interpreter.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`ptrace.tracee.interpreter.file.path`](#common-fileevent-path-doc) | File's path | -| [`ptrace.tracee.interpreter.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`ptrace.tracee.interpreter.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`ptrace.tracee.interpreter.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`ptrace.tracee.interpreter.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`ptrace.tracee.interpreter.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -1141,12 +1143,12 @@ A ptrace command was executed | [`ptrace.tracee.parent.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`ptrace.tracee.parent.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`ptrace.tracee.parent.file.name`](#common-fileevent-name-doc) | File's basename | -| [`ptrace.tracee.parent.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`ptrace.tracee.parent.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`ptrace.tracee.parent.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`ptrace.tracee.parent.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`ptrace.tracee.parent.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`ptrace.tracee.parent.file.path`](#common-fileevent-path-doc) | File's path | -| [`ptrace.tracee.parent.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`ptrace.tracee.parent.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`ptrace.tracee.parent.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`ptrace.tracee.parent.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`ptrace.tracee.parent.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -1167,12 +1169,12 @@ A ptrace command was executed | [`ptrace.tracee.parent.interpreter.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`ptrace.tracee.parent.interpreter.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`ptrace.tracee.parent.interpreter.file.name`](#common-fileevent-name-doc) | File's basename | -| [`ptrace.tracee.parent.interpreter.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`ptrace.tracee.parent.interpreter.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`ptrace.tracee.parent.interpreter.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`ptrace.tracee.parent.interpreter.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`ptrace.tracee.parent.interpreter.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`ptrace.tracee.parent.interpreter.file.path`](#common-fileevent-path-doc) | File's path | -| [`ptrace.tracee.parent.interpreter.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`ptrace.tracee.parent.interpreter.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`ptrace.tracee.parent.interpreter.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`ptrace.tracee.parent.interpreter.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`ptrace.tracee.parent.interpreter.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -1216,12 +1218,12 @@ Remove extended attributes | [`removexattr.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`removexattr.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`removexattr.file.name`](#common-fileevent-name-doc) | File's basename | -| [`removexattr.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`removexattr.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`removexattr.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`removexattr.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`removexattr.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`removexattr.file.path`](#common-fileevent-path-doc) | File's path | -| [`removexattr.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`removexattr.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`removexattr.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`removexattr.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`removexattr.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -1245,12 +1247,12 @@ A file/directory was renamed | [`rename.file.destination.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`rename.file.destination.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`rename.file.destination.name`](#common-fileevent-name-doc) | File's basename | -| [`rename.file.destination.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`rename.file.destination.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`rename.file.destination.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`rename.file.destination.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`rename.file.destination.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`rename.file.destination.path`](#common-fileevent-path-doc) | File's path | -| [`rename.file.destination.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`rename.file.destination.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`rename.file.destination.rights`](#common-filefields-rights-doc) | Rights of the file | | [`rename.file.destination.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`rename.file.destination.user`](#common-filefields-user-doc) | User of the file's owner | @@ -1264,12 +1266,12 @@ A file/directory was renamed | [`rename.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`rename.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`rename.file.name`](#common-fileevent-name-doc) | File's basename | -| [`rename.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`rename.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`rename.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`rename.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`rename.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`rename.file.path`](#common-fileevent-path-doc) | File's path | -| [`rename.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`rename.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`rename.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`rename.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`rename.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -1294,12 +1296,12 @@ A directory was removed | [`rmdir.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`rmdir.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`rmdir.file.name`](#common-fileevent-name-doc) | File's basename | -| [`rmdir.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`rmdir.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`rmdir.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`rmdir.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`rmdir.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`rmdir.file.path`](#common-fileevent-path-doc) | File's path | -| [`rmdir.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`rmdir.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`rmdir.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`rmdir.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`rmdir.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -1361,12 +1363,12 @@ Set exteneded attributes | [`setxattr.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`setxattr.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`setxattr.file.name`](#common-fileevent-name-doc) | File's basename | -| [`setxattr.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`setxattr.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`setxattr.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`setxattr.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`setxattr.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`setxattr.file.path`](#common-fileevent-path-doc) | File's path | -| [`setxattr.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`setxattr.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`setxattr.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`setxattr.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`setxattr.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -1414,12 +1416,12 @@ A signal was sent | [`signal.target.ancestors.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`signal.target.ancestors.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`signal.target.ancestors.file.name`](#common-fileevent-name-doc) | File's basename | -| [`signal.target.ancestors.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`signal.target.ancestors.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`signal.target.ancestors.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`signal.target.ancestors.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`signal.target.ancestors.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`signal.target.ancestors.file.path`](#common-fileevent-path-doc) | File's path | -| [`signal.target.ancestors.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`signal.target.ancestors.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`signal.target.ancestors.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`signal.target.ancestors.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`signal.target.ancestors.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -1440,17 +1442,18 @@ A signal was sent | [`signal.target.ancestors.interpreter.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`signal.target.ancestors.interpreter.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`signal.target.ancestors.interpreter.file.name`](#common-fileevent-name-doc) | File's basename | -| [`signal.target.ancestors.interpreter.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`signal.target.ancestors.interpreter.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`signal.target.ancestors.interpreter.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`signal.target.ancestors.interpreter.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`signal.target.ancestors.interpreter.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`signal.target.ancestors.interpreter.file.path`](#common-fileevent-path-doc) | File's path | -| [`signal.target.ancestors.interpreter.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`signal.target.ancestors.interpreter.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`signal.target.ancestors.interpreter.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`signal.target.ancestors.interpreter.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`signal.target.ancestors.interpreter.file.user`](#common-filefields-user-doc) | User of the file's owner | | [`signal.target.ancestors.is_kworker`](#common-pidcontext-is_kworker-doc) | Indicates whether the process is a kworker | | [`signal.target.ancestors.is_thread`](#common-process-is_thread-doc) | Indicates whether the process is considered a thread (that is, a child process that hasn't executed another program) | +| [`signal.target.ancestors.length`](#common-string-length-doc) | Length of the corresponding element | | [`signal.target.ancestors.pid`](#common-pidcontext-pid-doc) | Process ID of the process (also called thread group ID) | | [`signal.target.ancestors.ppid`](#common-process-ppid-doc) | Parent process ID | | [`signal.target.ancestors.tid`](#common-pidcontext-tid-doc) | Thread ID of the thread | @@ -1494,12 +1497,12 @@ A signal was sent | [`signal.target.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`signal.target.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`signal.target.file.name`](#common-fileevent-name-doc) | File's basename | -| [`signal.target.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`signal.target.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`signal.target.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`signal.target.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`signal.target.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`signal.target.file.path`](#common-fileevent-path-doc) | File's path | -| [`signal.target.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`signal.target.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`signal.target.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`signal.target.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`signal.target.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -1520,12 +1523,12 @@ A signal was sent | [`signal.target.interpreter.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`signal.target.interpreter.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`signal.target.interpreter.file.name`](#common-fileevent-name-doc) | File's basename | -| [`signal.target.interpreter.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`signal.target.interpreter.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`signal.target.interpreter.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`signal.target.interpreter.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`signal.target.interpreter.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`signal.target.interpreter.file.path`](#common-fileevent-path-doc) | File's path | -| [`signal.target.interpreter.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`signal.target.interpreter.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`signal.target.interpreter.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`signal.target.interpreter.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`signal.target.interpreter.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -1565,12 +1568,12 @@ A signal was sent | [`signal.target.parent.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`signal.target.parent.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`signal.target.parent.file.name`](#common-fileevent-name-doc) | File's basename | -| [`signal.target.parent.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`signal.target.parent.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`signal.target.parent.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`signal.target.parent.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`signal.target.parent.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`signal.target.parent.file.path`](#common-fileevent-path-doc) | File's path | -| [`signal.target.parent.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`signal.target.parent.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`signal.target.parent.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`signal.target.parent.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`signal.target.parent.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -1591,12 +1594,12 @@ A signal was sent | [`signal.target.parent.interpreter.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`signal.target.parent.interpreter.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`signal.target.parent.interpreter.file.name`](#common-fileevent-name-doc) | File's basename | -| [`signal.target.parent.interpreter.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`signal.target.parent.interpreter.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`signal.target.parent.interpreter.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`signal.target.parent.interpreter.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`signal.target.parent.interpreter.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`signal.target.parent.interpreter.file.path`](#common-fileevent-path-doc) | File's path | -| [`signal.target.parent.interpreter.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`signal.target.parent.interpreter.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`signal.target.parent.interpreter.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`signal.target.parent.interpreter.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`signal.target.parent.interpreter.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -1639,12 +1642,12 @@ A splice command was executed | [`splice.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`splice.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`splice.file.name`](#common-fileevent-name-doc) | File's basename | -| [`splice.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`splice.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`splice.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`splice.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`splice.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`splice.file.path`](#common-fileevent-path-doc) | File's path | -| [`splice.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`splice.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`splice.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`splice.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`splice.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -1669,12 +1672,12 @@ A file was deleted | [`unlink.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`unlink.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`unlink.file.name`](#common-fileevent-name-doc) | File's basename | -| [`unlink.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`unlink.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`unlink.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`unlink.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`unlink.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`unlink.file.path`](#common-fileevent-path-doc) | File's path | -| [`unlink.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`unlink.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`unlink.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`unlink.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`unlink.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -1710,12 +1713,12 @@ Change file access/modification times | [`utimes.file.modification_time`](#common-filefields-modification_time-doc) | Modification time (mtime) of the file | | [`utimes.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`utimes.file.name`](#common-fileevent-name-doc) | File's basename | -| [`utimes.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`utimes.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`utimes.file.package.name`](#common-fileevent-package-name-doc) | [Experimental] Name of the package that provided this file | | [`utimes.file.package.source_version`](#common-fileevent-package-source_version-doc) | [Experimental] Full version of the source package of the package that provided this file | | [`utimes.file.package.version`](#common-fileevent-package-version-doc) | [Experimental] Full version of the package that provided this file | | [`utimes.file.path`](#common-fileevent-path-doc) | File's path | -| [`utimes.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`utimes.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`utimes.file.rights`](#common-filefields-rights-doc) | Rights of the file | | [`utimes.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`utimes.file.user`](#common-filefields-user-doc) | User of the file's owner | @@ -2181,10 +2184,10 @@ Constants: [L4 protocols](#l4-protocols) ### `*.length` {#common-string-length-doc} Type: int -Definition: Length of the corresponding string +Definition: Length of the corresponding element -`*.length` has 79 possible prefixes: -`chdir.file.name` `chdir.file.path` `chmod.file.name` `chmod.file.path` `chown.file.name` `chown.file.path` `dns.question.name` `exec.file.name` `exec.file.path` `exec.interpreter.file.name` `exec.interpreter.file.path` `exit.file.name` `exit.file.path` `exit.interpreter.file.name` `exit.interpreter.file.path` `link.file.destination.name` `link.file.destination.path` `link.file.name` `link.file.path` `load_module.file.name` `load_module.file.path` `mkdir.file.name` `mkdir.file.path` `mmap.file.name` `mmap.file.path` `open.file.name` `open.file.path` `process.ancestors.file.name` `process.ancestors.file.path` `process.ancestors.interpreter.file.name` `process.ancestors.interpreter.file.path` `process.file.name` `process.file.path` `process.interpreter.file.name` `process.interpreter.file.path` `process.parent.file.name` `process.parent.file.path` `process.parent.interpreter.file.name` `process.parent.interpreter.file.path` `ptrace.tracee.ancestors.file.name` `ptrace.tracee.ancestors.file.path` `ptrace.tracee.ancestors.interpreter.file.name` `ptrace.tracee.ancestors.interpreter.file.path` `ptrace.tracee.file.name` `ptrace.tracee.file.path` `ptrace.tracee.interpreter.file.name` `ptrace.tracee.interpreter.file.path` `ptrace.tracee.parent.file.name` `ptrace.tracee.parent.file.path` `ptrace.tracee.parent.interpreter.file.name` `ptrace.tracee.parent.interpreter.file.path` `removexattr.file.name` `removexattr.file.path` `rename.file.destination.name` `rename.file.destination.path` `rename.file.name` `rename.file.path` `rmdir.file.name` `rmdir.file.path` `setxattr.file.name` `setxattr.file.path` `signal.target.ancestors.file.name` `signal.target.ancestors.file.path` `signal.target.ancestors.interpreter.file.name` `signal.target.ancestors.interpreter.file.path` `signal.target.file.name` `signal.target.file.path` `signal.target.interpreter.file.name` `signal.target.interpreter.file.path` `signal.target.parent.file.name` `signal.target.parent.file.path` `signal.target.parent.interpreter.file.name` `signal.target.parent.interpreter.file.path` `splice.file.name` `splice.file.path` `unlink.file.name` `unlink.file.path` `utimes.file.name` `utimes.file.path` +`*.length` has 82 possible prefixes: +`chdir.file.name` `chdir.file.path` `chmod.file.name` `chmod.file.path` `chown.file.name` `chown.file.path` `dns.question.name` `exec.file.name` `exec.file.path` `exec.interpreter.file.name` `exec.interpreter.file.path` `exit.file.name` `exit.file.path` `exit.interpreter.file.name` `exit.interpreter.file.path` `link.file.destination.name` `link.file.destination.path` `link.file.name` `link.file.path` `load_module.file.name` `load_module.file.path` `mkdir.file.name` `mkdir.file.path` `mmap.file.name` `mmap.file.path` `open.file.name` `open.file.path` `process.ancestors` `process.ancestors.file.name` `process.ancestors.file.path` `process.ancestors.interpreter.file.name` `process.ancestors.interpreter.file.path` `process.file.name` `process.file.path` `process.interpreter.file.name` `process.interpreter.file.path` `process.parent.file.name` `process.parent.file.path` `process.parent.interpreter.file.name` `process.parent.interpreter.file.path` `ptrace.tracee.ancestors` `ptrace.tracee.ancestors.file.name` `ptrace.tracee.ancestors.file.path` `ptrace.tracee.ancestors.interpreter.file.name` `ptrace.tracee.ancestors.interpreter.file.path` `ptrace.tracee.file.name` `ptrace.tracee.file.path` `ptrace.tracee.interpreter.file.name` `ptrace.tracee.interpreter.file.path` `ptrace.tracee.parent.file.name` `ptrace.tracee.parent.file.path` `ptrace.tracee.parent.interpreter.file.name` `ptrace.tracee.parent.interpreter.file.path` `removexattr.file.name` `removexattr.file.path` `rename.file.destination.name` `rename.file.destination.path` `rename.file.name` `rename.file.path` `rmdir.file.name` `rmdir.file.path` `setxattr.file.name` `setxattr.file.path` `signal.target.ancestors` `signal.target.ancestors.file.name` `signal.target.ancestors.file.path` `signal.target.ancestors.interpreter.file.name` `signal.target.ancestors.interpreter.file.path` `signal.target.file.name` `signal.target.file.path` `signal.target.interpreter.file.name` `signal.target.interpreter.file.path` `signal.target.parent.file.name` `signal.target.parent.file.path` `signal.target.parent.interpreter.file.name` `signal.target.parent.interpreter.file.path` `splice.file.name` `splice.file.path` `unlink.file.name` `unlink.file.path` `utimes.file.name` `utimes.file.path` ### `*.manager` {#common-cgroupcontext-manager-doc} diff --git a/docs/cloud-workload-security/secl_linux.json b/docs/cloud-workload-security/secl_linux.json index 8ff7c26142565..8e98f4ff09763 100644 --- a/docs/cloud-workload-security/secl_linux.json +++ b/docs/cloud-workload-security/secl_linux.json @@ -249,7 +249,7 @@ }, { "name": "process.ancestors.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -274,7 +274,7 @@ }, { "name": "process.ancestors.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -379,7 +379,7 @@ }, { "name": "process.ancestors.interpreter.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -404,7 +404,7 @@ }, { "name": "process.ancestors.interpreter.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -432,6 +432,11 @@ "definition": "Indicates whether the process is considered a thread (that is, a child process that hasn't executed another program)", "property_doc_link": "common-process-is_thread-doc" }, + { + "name": "process.ancestors.length", + "definition": "Length of the corresponding element", + "property_doc_link": "common-string-length-doc" + }, { "name": "process.ancestors.pid", "definition": "Process ID of the process (also called thread group ID)", @@ -649,7 +654,7 @@ }, { "name": "process.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -674,7 +679,7 @@ }, { "name": "process.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -779,7 +784,7 @@ }, { "name": "process.interpreter.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -804,7 +809,7 @@ }, { "name": "process.interpreter.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -1004,7 +1009,7 @@ }, { "name": "process.parent.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -1029,7 +1034,7 @@ }, { "name": "process.parent.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -1134,7 +1139,7 @@ }, { "name": "process.parent.interpreter.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -1159,7 +1164,7 @@ }, { "name": "process.parent.interpreter.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -1445,7 +1450,7 @@ }, { "name": "chdir.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -1470,7 +1475,7 @@ }, { "name": "chdir.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -1574,7 +1579,7 @@ }, { "name": "chmod.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -1599,7 +1604,7 @@ }, { "name": "chmod.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -1718,7 +1723,7 @@ }, { "name": "chown.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -1743,7 +1748,7 @@ }, { "name": "chown.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -1817,7 +1822,7 @@ }, { "name": "dns.question.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -2046,7 +2051,7 @@ }, { "name": "exec.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -2071,7 +2076,7 @@ }, { "name": "exec.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -2176,7 +2181,7 @@ }, { "name": "exec.interpreter.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -2201,7 +2206,7 @@ }, { "name": "exec.interpreter.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -2470,7 +2475,7 @@ }, { "name": "exit.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -2495,7 +2500,7 @@ }, { "name": "exit.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -2600,7 +2605,7 @@ }, { "name": "exit.interpreter.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -2625,7 +2630,7 @@ }, { "name": "exit.interpreter.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -2858,7 +2863,7 @@ }, { "name": "link.file.destination.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -2883,7 +2888,7 @@ }, { "name": "link.file.destination.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -2953,7 +2958,7 @@ }, { "name": "link.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -2978,7 +2983,7 @@ }, { "name": "link.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -3092,7 +3097,7 @@ }, { "name": "load_module.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -3117,7 +3122,7 @@ }, { "name": "load_module.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -3226,7 +3231,7 @@ }, { "name": "mkdir.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -3251,7 +3256,7 @@ }, { "name": "mkdir.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -3340,7 +3345,7 @@ }, { "name": "mmap.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -3365,7 +3370,7 @@ }, { "name": "mmap.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -3542,7 +3547,7 @@ }, { "name": "open.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -3567,7 +3572,7 @@ }, { "name": "open.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -3855,7 +3860,7 @@ }, { "name": "ptrace.tracee.ancestors.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -3880,7 +3885,7 @@ }, { "name": "ptrace.tracee.ancestors.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -3985,7 +3990,7 @@ }, { "name": "ptrace.tracee.ancestors.interpreter.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -4010,7 +4015,7 @@ }, { "name": "ptrace.tracee.ancestors.interpreter.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -4038,6 +4043,11 @@ "definition": "Indicates whether the process is considered a thread (that is, a child process that hasn't executed another program)", "property_doc_link": "common-process-is_thread-doc" }, + { + "name": "ptrace.tracee.ancestors.length", + "definition": "Length of the corresponding element", + "property_doc_link": "common-string-length-doc" + }, { "name": "ptrace.tracee.ancestors.pid", "definition": "Process ID of the process (also called thread group ID)", @@ -4255,7 +4265,7 @@ }, { "name": "ptrace.tracee.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -4280,7 +4290,7 @@ }, { "name": "ptrace.tracee.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -4385,7 +4395,7 @@ }, { "name": "ptrace.tracee.interpreter.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -4410,7 +4420,7 @@ }, { "name": "ptrace.tracee.interpreter.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -4610,7 +4620,7 @@ }, { "name": "ptrace.tracee.parent.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -4635,7 +4645,7 @@ }, { "name": "ptrace.tracee.parent.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -4740,7 +4750,7 @@ }, { "name": "ptrace.tracee.parent.interpreter.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -4765,7 +4775,7 @@ }, { "name": "ptrace.tracee.parent.interpreter.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -4959,7 +4969,7 @@ }, { "name": "removexattr.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -4984,7 +4994,7 @@ }, { "name": "removexattr.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -5078,7 +5088,7 @@ }, { "name": "rename.file.destination.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -5103,7 +5113,7 @@ }, { "name": "rename.file.destination.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -5173,7 +5183,7 @@ }, { "name": "rename.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -5198,7 +5208,7 @@ }, { "name": "rename.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -5297,7 +5307,7 @@ }, { "name": "rmdir.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -5322,7 +5332,7 @@ }, { "name": "rmdir.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -5528,7 +5538,7 @@ }, { "name": "setxattr.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -5553,7 +5563,7 @@ }, { "name": "setxattr.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -5767,7 +5777,7 @@ }, { "name": "signal.target.ancestors.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -5792,7 +5802,7 @@ }, { "name": "signal.target.ancestors.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -5897,7 +5907,7 @@ }, { "name": "signal.target.ancestors.interpreter.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -5922,7 +5932,7 @@ }, { "name": "signal.target.ancestors.interpreter.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -5950,6 +5960,11 @@ "definition": "Indicates whether the process is considered a thread (that is, a child process that hasn't executed another program)", "property_doc_link": "common-process-is_thread-doc" }, + { + "name": "signal.target.ancestors.length", + "definition": "Length of the corresponding element", + "property_doc_link": "common-string-length-doc" + }, { "name": "signal.target.ancestors.pid", "definition": "Process ID of the process (also called thread group ID)", @@ -6167,7 +6182,7 @@ }, { "name": "signal.target.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -6192,7 +6207,7 @@ }, { "name": "signal.target.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -6297,7 +6312,7 @@ }, { "name": "signal.target.interpreter.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -6322,7 +6337,7 @@ }, { "name": "signal.target.interpreter.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -6522,7 +6537,7 @@ }, { "name": "signal.target.parent.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -6547,7 +6562,7 @@ }, { "name": "signal.target.parent.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -6652,7 +6667,7 @@ }, { "name": "signal.target.parent.interpreter.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -6677,7 +6692,7 @@ }, { "name": "signal.target.parent.interpreter.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -6866,7 +6881,7 @@ }, { "name": "splice.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -6891,7 +6906,7 @@ }, { "name": "splice.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -6990,7 +7005,7 @@ }, { "name": "unlink.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -7015,7 +7030,7 @@ }, { "name": "unlink.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -7143,7 +7158,7 @@ }, { "name": "utimes.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -7168,7 +7183,7 @@ }, { "name": "utimes.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -8354,7 +8369,7 @@ "name": "*.length", "link": "common-string-length-doc", "type": "int", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "prefixes": [ "chdir.file.name", "chdir.file.path", @@ -8383,6 +8398,7 @@ "mmap.file.path", "open.file.name", "open.file.path", + "process.ancestors", "process.ancestors.file.name", "process.ancestors.file.path", "process.ancestors.interpreter.file.name", @@ -8395,6 +8411,7 @@ "process.parent.file.path", "process.parent.interpreter.file.name", "process.parent.interpreter.file.path", + "ptrace.tracee.ancestors", "ptrace.tracee.ancestors.file.name", "ptrace.tracee.ancestors.file.path", "ptrace.tracee.ancestors.interpreter.file.name", @@ -8417,6 +8434,7 @@ "rmdir.file.path", "setxattr.file.name", "setxattr.file.path", + "signal.target.ancestors", "signal.target.ancestors.file.name", "signal.target.ancestors.file.path", "signal.target.ancestors.interpreter.file.name", diff --git a/docs/cloud-workload-security/secl_windows.json b/docs/cloud-workload-security/secl_windows.json index bdcf0069bfc56..e8e3f1e601c4e 100644 --- a/docs/cloud-workload-security/secl_windows.json +++ b/docs/cloud-workload-security/secl_windows.json @@ -84,7 +84,7 @@ }, { "name": "process.ancestors.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -94,7 +94,12 @@ }, { "name": "process.ancestors.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", + "property_doc_link": "common-string-length-doc" + }, + { + "name": "process.ancestors.length", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -149,7 +154,7 @@ }, { "name": "process.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -159,7 +164,7 @@ }, { "name": "process.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -194,7 +199,7 @@ }, { "name": "process.parent.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -204,7 +209,7 @@ }, { "name": "process.parent.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -302,7 +307,7 @@ }, { "name": "create.file.device_path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -312,7 +317,7 @@ }, { "name": "create.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -322,7 +327,7 @@ }, { "name": "create.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" } ] @@ -341,7 +346,7 @@ }, { "name": "create.registry.key_name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -351,7 +356,7 @@ }, { "name": "create.registry.key_path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -361,7 +366,7 @@ }, { "name": "create_key.registry.key_name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -371,7 +376,7 @@ }, { "name": "create_key.registry.key_path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" } ] @@ -390,7 +395,7 @@ }, { "name": "delete.file.device_path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -400,7 +405,7 @@ }, { "name": "delete.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -410,7 +415,7 @@ }, { "name": "delete.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" } ] @@ -429,7 +434,7 @@ }, { "name": "delete.registry.key_name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -439,7 +444,7 @@ }, { "name": "delete.registry.key_path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -449,7 +454,7 @@ }, { "name": "delete_key.registry.key_name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -459,7 +464,7 @@ }, { "name": "delete_key.registry.key_path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" } ] @@ -503,7 +508,7 @@ }, { "name": "exec.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -513,7 +518,7 @@ }, { "name": "exec.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -587,7 +592,7 @@ }, { "name": "exit.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -597,7 +602,7 @@ }, { "name": "exit.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -636,7 +641,7 @@ }, { "name": "open.registry.key_name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -646,7 +651,7 @@ }, { "name": "open.registry.key_path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -656,7 +661,7 @@ }, { "name": "open_key.registry.key_name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -666,7 +671,7 @@ }, { "name": "open_key.registry.key_path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" } ] @@ -685,7 +690,7 @@ }, { "name": "rename.file.destination.device_path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -695,7 +700,7 @@ }, { "name": "rename.file.destination.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -705,7 +710,7 @@ }, { "name": "rename.file.destination.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -715,7 +720,7 @@ }, { "name": "rename.file.device_path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -725,7 +730,7 @@ }, { "name": "rename.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -735,7 +740,7 @@ }, { "name": "rename.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" } ] @@ -754,7 +759,7 @@ }, { "name": "set.registry.key_name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -764,7 +769,7 @@ }, { "name": "set.registry.key_path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -774,7 +779,7 @@ }, { "name": "set.registry.value_name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -789,7 +794,7 @@ }, { "name": "set_key_value.registry.key_name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -799,7 +804,7 @@ }, { "name": "set_key_value.registry.key_path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -809,7 +814,7 @@ }, { "name": "set_key_value.registry.value_name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -833,7 +838,7 @@ }, { "name": "write.file.device_path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -843,7 +848,7 @@ }, { "name": "write.file.name.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" }, { @@ -853,7 +858,7 @@ }, { "name": "write.file.path.length", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "property_doc_link": "common-string-length-doc" } ] @@ -1012,7 +1017,7 @@ "name": "*.length", "link": "common-string-length-doc", "type": "int", - "definition": "Length of the corresponding string", + "definition": "Length of the corresponding element", "prefixes": [ "create.file.device_path", "create.file.name", @@ -1036,6 +1041,7 @@ "open.registry.key_path", "open_key.registry.key_name", "open_key.registry.key_path", + "process.ancestors", "process.ancestors.file.name", "process.ancestors.file.path", "process.file.name", diff --git a/docs/cloud-workload-security/windows_expressions.md b/docs/cloud-workload-security/windows_expressions.md index c749c152c24cd..035c98e380bd8 100644 --- a/docs/cloud-workload-security/windows_expressions.md +++ b/docs/cloud-workload-security/windows_expressions.md @@ -79,9 +79,10 @@ List of the available variables: | [`process.ancestors.envp`](#common-process-envp-doc) | Environment variables of the process | | [`process.ancestors.envs`](#common-process-envs-doc) | Environment variable names of the process | | [`process.ancestors.file.name`](#common-fileevent-name-doc) | File's basename | -| [`process.ancestors.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`process.ancestors.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`process.ancestors.file.path`](#common-fileevent-path-doc) | File's path | -| [`process.ancestors.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`process.ancestors.file.path.length`](#common-string-length-doc) | Length of the corresponding element | +| [`process.ancestors.length`](#common-string-length-doc) | Length of the corresponding element | | [`process.ancestors.pid`](#common-pidcontext-pid-doc) | Process ID of the process (also called thread group ID) | | [`process.ancestors.ppid`](#common-process-ppid-doc) | Parent process ID | | [`process.ancestors.user`](#common-process-user-doc) | User name | @@ -92,18 +93,18 @@ List of the available variables: | [`process.envp`](#common-process-envp-doc) | Environment variables of the process | | [`process.envs`](#common-process-envs-doc) | Environment variable names of the process | | [`process.file.name`](#common-fileevent-name-doc) | File's basename | -| [`process.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`process.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`process.file.path`](#common-fileevent-path-doc) | File's path | -| [`process.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`process.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`process.parent.cmdline`](#common-process-cmdline-doc) | Command line of the process | | [`process.parent.container.id`](#common-process-container-id-doc) | Container ID | | [`process.parent.created_at`](#common-process-created_at-doc) | Timestamp of the creation of the process | | [`process.parent.envp`](#common-process-envp-doc) | Environment variables of the process | | [`process.parent.envs`](#common-process-envs-doc) | Environment variable names of the process | | [`process.parent.file.name`](#common-fileevent-name-doc) | File's basename | -| [`process.parent.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`process.parent.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`process.parent.file.path`](#common-fileevent-path-doc) | File's path | -| [`process.parent.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`process.parent.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`process.parent.pid`](#common-pidcontext-pid-doc) | Process ID of the process (also called thread group ID) | | [`process.parent.ppid`](#common-process-ppid-doc) | Parent process ID | | [`process.parent.user`](#common-process-user-doc) | User name | @@ -133,11 +134,11 @@ A file was created | Property | Definition | | -------- | ------------- | | [`create.file.device_path`](#common-fimfileevent-device_path-doc) | File's path | -| [`create.file.device_path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`create.file.device_path.length`](#common-string-length-doc) | Length of the corresponding element | | [`create.file.name`](#common-fimfileevent-name-doc) | File's basename | -| [`create.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`create.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`create.file.path`](#common-fimfileevent-path-doc) | File's path | -| [`create.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`create.file.path.length`](#common-string-length-doc) | Length of the corresponding element | ### Event `create_key` @@ -146,13 +147,13 @@ A registry key was created | Property | Definition | | -------- | ------------- | | [`create.registry.key_name`](#common-registryevent-key_name-doc) | Registry's name | -| [`create.registry.key_name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`create.registry.key_name.length`](#common-string-length-doc) | Length of the corresponding element | | [`create.registry.key_path`](#common-registryevent-key_path-doc) | Registry's path | -| [`create.registry.key_path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`create.registry.key_path.length`](#common-string-length-doc) | Length of the corresponding element | | [`create_key.registry.key_name`](#common-registryevent-key_name-doc) | Registry's name | -| [`create_key.registry.key_name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`create_key.registry.key_name.length`](#common-string-length-doc) | Length of the corresponding element | | [`create_key.registry.key_path`](#common-registryevent-key_path-doc) | Registry's path | -| [`create_key.registry.key_path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`create_key.registry.key_path.length`](#common-string-length-doc) | Length of the corresponding element | ### Event `delete` @@ -161,11 +162,11 @@ A file was deleted | Property | Definition | | -------- | ------------- | | [`delete.file.device_path`](#common-fimfileevent-device_path-doc) | File's path | -| [`delete.file.device_path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`delete.file.device_path.length`](#common-string-length-doc) | Length of the corresponding element | | [`delete.file.name`](#common-fimfileevent-name-doc) | File's basename | -| [`delete.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`delete.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`delete.file.path`](#common-fimfileevent-path-doc) | File's path | -| [`delete.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`delete.file.path.length`](#common-string-length-doc) | Length of the corresponding element | ### Event `delete_key` @@ -174,13 +175,13 @@ A registry key was deleted | Property | Definition | | -------- | ------------- | | [`delete.registry.key_name`](#common-registryevent-key_name-doc) | Registry's name | -| [`delete.registry.key_name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`delete.registry.key_name.length`](#common-string-length-doc) | Length of the corresponding element | | [`delete.registry.key_path`](#common-registryevent-key_path-doc) | Registry's path | -| [`delete.registry.key_path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`delete.registry.key_path.length`](#common-string-length-doc) | Length of the corresponding element | | [`delete_key.registry.key_name`](#common-registryevent-key_name-doc) | Registry's name | -| [`delete_key.registry.key_name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`delete_key.registry.key_name.length`](#common-string-length-doc) | Length of the corresponding element | | [`delete_key.registry.key_path`](#common-registryevent-key_path-doc) | Registry's path | -| [`delete_key.registry.key_path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`delete_key.registry.key_path.length`](#common-string-length-doc) | Length of the corresponding element | ### Event `exec` @@ -194,9 +195,9 @@ A process was executed or forked | [`exec.envp`](#common-process-envp-doc) | Environment variables of the process | | [`exec.envs`](#common-process-envs-doc) | Environment variable names of the process | | [`exec.file.name`](#common-fileevent-name-doc) | File's basename | -| [`exec.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`exec.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`exec.file.path`](#common-fileevent-path-doc) | File's path | -| [`exec.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`exec.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`exec.pid`](#common-pidcontext-pid-doc) | Process ID of the process (also called thread group ID) | | [`exec.ppid`](#common-process-ppid-doc) | Parent process ID | | [`exec.user`](#common-process-user-doc) | User name | @@ -216,9 +217,9 @@ A process was terminated | [`exit.envp`](#common-process-envp-doc) | Environment variables of the process | | [`exit.envs`](#common-process-envs-doc) | Environment variable names of the process | | [`exit.file.name`](#common-fileevent-name-doc) | File's basename | -| [`exit.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`exit.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`exit.file.path`](#common-fileevent-path-doc) | File's path | -| [`exit.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`exit.file.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`exit.pid`](#common-pidcontext-pid-doc) | Process ID of the process (also called thread group ID) | | [`exit.ppid`](#common-process-ppid-doc) | Parent process ID | | [`exit.user`](#common-process-user-doc) | User name | @@ -231,13 +232,13 @@ A registry key was opened | Property | Definition | | -------- | ------------- | | [`open.registry.key_name`](#common-registryevent-key_name-doc) | Registry's name | -| [`open.registry.key_name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`open.registry.key_name.length`](#common-string-length-doc) | Length of the corresponding element | | [`open.registry.key_path`](#common-registryevent-key_path-doc) | Registry's path | -| [`open.registry.key_path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`open.registry.key_path.length`](#common-string-length-doc) | Length of the corresponding element | | [`open_key.registry.key_name`](#common-registryevent-key_name-doc) | Registry's name | -| [`open_key.registry.key_name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`open_key.registry.key_name.length`](#common-string-length-doc) | Length of the corresponding element | | [`open_key.registry.key_path`](#common-registryevent-key_path-doc) | Registry's path | -| [`open_key.registry.key_path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`open_key.registry.key_path.length`](#common-string-length-doc) | Length of the corresponding element | ### Event `rename` @@ -246,17 +247,17 @@ A file was renamed | Property | Definition | | -------- | ------------- | | [`rename.file.destination.device_path`](#common-fimfileevent-device_path-doc) | File's path | -| [`rename.file.destination.device_path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`rename.file.destination.device_path.length`](#common-string-length-doc) | Length of the corresponding element | | [`rename.file.destination.name`](#common-fimfileevent-name-doc) | File's basename | -| [`rename.file.destination.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`rename.file.destination.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`rename.file.destination.path`](#common-fimfileevent-path-doc) | File's path | -| [`rename.file.destination.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`rename.file.destination.path.length`](#common-string-length-doc) | Length of the corresponding element | | [`rename.file.device_path`](#common-fimfileevent-device_path-doc) | File's path | -| [`rename.file.device_path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`rename.file.device_path.length`](#common-string-length-doc) | Length of the corresponding element | | [`rename.file.name`](#common-fimfileevent-name-doc) | File's basename | -| [`rename.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`rename.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`rename.file.path`](#common-fimfileevent-path-doc) | File's path | -| [`rename.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`rename.file.path.length`](#common-string-length-doc) | Length of the corresponding element | ### Event `set_key_value` @@ -265,18 +266,18 @@ A registry key value was set | Property | Definition | | -------- | ------------- | | [`set.registry.key_name`](#common-registryevent-key_name-doc) | Registry's name | -| [`set.registry.key_name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`set.registry.key_name.length`](#common-string-length-doc) | Length of the corresponding element | | [`set.registry.key_path`](#common-registryevent-key_path-doc) | Registry's path | -| [`set.registry.key_path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`set.registry.key_path.length`](#common-string-length-doc) | Length of the corresponding element | | [`set.registry.value_name`](#common-setregistrykeyvalueevent-registry-value_name-doc) | Registry's value name | -| [`set.registry.value_name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`set.registry.value_name.length`](#common-string-length-doc) | Length of the corresponding element | | [`set.value_name`](#common-setregistrykeyvalueevent-value_name-doc) | Registry's value name | | [`set_key_value.registry.key_name`](#common-registryevent-key_name-doc) | Registry's name | -| [`set_key_value.registry.key_name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`set_key_value.registry.key_name.length`](#common-string-length-doc) | Length of the corresponding element | | [`set_key_value.registry.key_path`](#common-registryevent-key_path-doc) | Registry's path | -| [`set_key_value.registry.key_path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`set_key_value.registry.key_path.length`](#common-string-length-doc) | Length of the corresponding element | | [`set_key_value.registry.value_name`](#common-setregistrykeyvalueevent-registry-value_name-doc) | Registry's value name | -| [`set_key_value.registry.value_name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`set_key_value.registry.value_name.length`](#common-string-length-doc) | Length of the corresponding element | | [`set_key_value.value_name`](#common-setregistrykeyvalueevent-value_name-doc) | Registry's value name | ### Event `write` @@ -286,11 +287,11 @@ A file was written | Property | Definition | | -------- | ------------- | | [`write.file.device_path`](#common-fimfileevent-device_path-doc) | File's path | -| [`write.file.device_path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`write.file.device_path.length`](#common-string-length-doc) | Length of the corresponding element | | [`write.file.name`](#common-fimfileevent-name-doc) | File's basename | -| [`write.file.name.length`](#common-string-length-doc) | Length of the corresponding string | +| [`write.file.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`write.file.path`](#common-fimfileevent-path-doc) | File's path | -| [`write.file.path.length`](#common-string-length-doc) | Length of the corresponding string | +| [`write.file.path.length`](#common-string-length-doc) | Length of the corresponding element | ## Attributes documentation @@ -397,10 +398,10 @@ Definition: Registry's path ### `*.length` {#common-string-length-doc} Type: int -Definition: Length of the corresponding string +Definition: Length of the corresponding element -`*.length` has 43 possible prefixes: -`create.file.device_path` `create.file.name` `create.file.path` `create.registry.key_name` `create.registry.key_path` `create_key.registry.key_name` `create_key.registry.key_path` `delete.file.device_path` `delete.file.name` `delete.file.path` `delete.registry.key_name` `delete.registry.key_path` `delete_key.registry.key_name` `delete_key.registry.key_path` `exec.file.name` `exec.file.path` `exit.file.name` `exit.file.path` `open.registry.key_name` `open.registry.key_path` `open_key.registry.key_name` `open_key.registry.key_path` `process.ancestors.file.name` `process.ancestors.file.path` `process.file.name` `process.file.path` `process.parent.file.name` `process.parent.file.path` `rename.file.destination.device_path` `rename.file.destination.name` `rename.file.destination.path` `rename.file.device_path` `rename.file.name` `rename.file.path` `set.registry.key_name` `set.registry.key_path` `set.registry.value_name` `set_key_value.registry.key_name` `set_key_value.registry.key_path` `set_key_value.registry.value_name` `write.file.device_path` `write.file.name` `write.file.path` +`*.length` has 44 possible prefixes: +`create.file.device_path` `create.file.name` `create.file.path` `create.registry.key_name` `create.registry.key_path` `create_key.registry.key_name` `create_key.registry.key_path` `delete.file.device_path` `delete.file.name` `delete.file.path` `delete.registry.key_name` `delete.registry.key_path` `delete_key.registry.key_name` `delete_key.registry.key_path` `exec.file.name` `exec.file.path` `exit.file.name` `exit.file.path` `open.registry.key_name` `open.registry.key_path` `open_key.registry.key_name` `open_key.registry.key_path` `process.ancestors` `process.ancestors.file.name` `process.ancestors.file.path` `process.file.name` `process.file.path` `process.parent.file.name` `process.parent.file.path` `rename.file.destination.device_path` `rename.file.destination.name` `rename.file.destination.path` `rename.file.device_path` `rename.file.name` `rename.file.path` `set.registry.key_name` `set.registry.key_path` `set.registry.value_name` `set_key_value.registry.key_name` `set_key_value.registry.key_path` `set_key_value.registry.value_name` `write.file.device_path` `write.file.name` `write.file.path` ### `*.name` {#common-fileevent-name-doc} diff --git a/flakes.yaml b/flakes.yaml index a3d6f71ed04a2..d4cc7c24329d1 100644 --- a/flakes.yaml +++ b/flakes.yaml @@ -11,6 +11,5 @@ test/new-e2e/tests/containers: - TestECSSuite/TestCPU/metric___container.cpu.usage{^ecs_container_name:stress-ng$} - TestEKSSuite/TestCPU/metric___container.cpu.usage{^kube_deployment:stress-ng$,^kube_namespace:workload-cpustress$} - TestKindSuite/TestCPU/metric___container.cpu.usage{^kube_deployment:stress-ng$,^kube_namespace:workload-cpustress$} - -test/new-e2e/tests/installer: - - TestPackages/upgrade_scenario_ubuntu_22_04_x86_64/TestUpgradeSuccessful + - TestECSSuite/Test00UpAndRunning/ECS_tasks_are_ready + - TestECSSuite/TestWindowsFargate diff --git a/go.mod b/go.mod index 9b4978e72091d..87481f0ed6861 100644 --- a/go.mod +++ b/go.mod @@ -145,10 +145,10 @@ require ( code.cloudfoundry.org/garden v0.0.0-20210208153517-580cadd489d2 code.cloudfoundry.org/lager v2.0.0+incompatible github.com/CycloneDX/cyclonedx-go v0.8.0 - github.com/DataDog/appsec-internal-go v1.7.0 + github.com/DataDog/appsec-internal-go v1.8.0 github.com/DataDog/datadog-agent/pkg/gohai v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/obfuscate v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.57.0 github.com/DataDog/datadog-agent/pkg/security/secl v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/cgroups v0.56.0-rc.3 @@ -171,7 +171,7 @@ require ( github.com/Masterminds/semver/v3 v3.3.0 github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/Microsoft/go-winio v0.6.2 - github.com/Microsoft/hcsshim v0.12.7 + github.com/Microsoft/hcsshim v0.12.8 github.com/acobaugh/osrelease v0.1.0 github.com/alecthomas/participle v0.7.1 // indirect github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 @@ -188,12 +188,12 @@ require ( github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 github.com/cilium/ebpf v0.16.0 github.com/clbanning/mxj v1.8.4 - github.com/containerd/containerd v1.7.21 + github.com/containerd/containerd v1.7.23 github.com/containernetworking/cni v1.2.3 github.com/coreos/go-semver v0.3.1 github.com/coreos/go-systemd v22.5.0+incompatible github.com/cri-o/ocicni v0.4.3 - github.com/cyphar/filepath-securejoin v0.3.3 + github.com/cyphar/filepath-securejoin v0.3.4 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/docker/docker v27.1.1+incompatible github.com/docker/go-connections v0.5.0 @@ -243,7 +243,7 @@ require ( github.com/netsampler/goflow2 v1.3.3 github.com/olekukonko/tablewriter v0.0.5 github.com/oliveagle/jsonpath v0.0.0-20180606110733-2e52cf6e6852 - github.com/open-policy-agent/opa v0.68.0 + github.com/open-policy-agent/opa v0.69.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.104.0 // indirect github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.0 @@ -252,7 +252,7 @@ require ( github.com/pahanini/go-grpc-bidirectional-streaming-example v0.0.0-20211027164128-cc6111af44be github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.20.2 + github.com/prometheus/client_golang v1.20.4 github.com/prometheus/client_model v0.6.1 github.com/prometheus/procfs v0.15.1 github.com/redis/go-redis/v9 v9.1.0 @@ -298,7 +298,7 @@ require ( go.opentelemetry.io/collector/receiver/otlpreceiver v0.104.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect go.uber.org/atomic v1.11.0 - go.uber.org/automaxprocs v1.5.3 + go.uber.org/automaxprocs v1.6.0 go.uber.org/dig v1.18.0 go.uber.org/fx v1.22.2 go.uber.org/multierr v1.11.0 @@ -317,7 +317,7 @@ require ( google.golang.org/grpc v1.67.1 google.golang.org/grpc/examples v0.0.0-20221020162917-9127159caf5a google.golang.org/protobuf v1.35.1 - gopkg.in/DataDog/dd-trace-go.v1 v1.68.0 + gopkg.in/DataDog/dd-trace-go.v1 v1.69.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 gopkg.in/zorkian/go-datadog-api.v2 v2.30.0 @@ -364,7 +364,7 @@ require ( github.com/OneOfOne/xxhash v1.2.8 // indirect github.com/ProtonMail/go-crypto v1.1.0-alpha.0 github.com/StackExchange/wmi v1.2.1 // indirect - github.com/agnivade/levenshtein v1.1.1 // indirect + github.com/agnivade/levenshtein v1.2.0 // indirect github.com/aquasecurity/go-gem-version v0.0.0-20201115065557-8eed6fe000ce // indirect github.com/aquasecurity/go-npm-version v0.0.0-20201110091526-0b796d180798 // indirect github.com/aquasecurity/go-pep440-version v0.0.0-20210121094942-22b2f8951d46 // indirect @@ -604,7 +604,7 @@ require ( github.com/DataDog/datadog-agent/pkg/util/defaultpaths v0.0.0-00010101000000-000000000000 github.com/NVIDIA/go-nvml v0.12.4-0 github.com/containerd/containerd/api v1.7.19 - github.com/containerd/errdefs v0.1.0 + github.com/containerd/errdefs v0.3.0 github.com/distribution/reference v0.6.0 github.com/jellydator/ttlcache/v3 v3.3.0 github.com/kouhin/envflag v0.0.0-20150818174321-0e9a86061649 @@ -705,7 +705,7 @@ require ( github.com/DataDog/datadog-agent/pkg/util/uuid v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.1 github.com/DataDog/datadog-agent/pkg/version v0.57.0 - github.com/DataDog/go-libddwaf/v3 v3.3.0 + github.com/DataDog/go-libddwaf/v3 v3.4.0 github.com/DataDog/go-sqllexer v0.0.16 github.com/Datadog/dublin-traceroute v0.0.2 github.com/aquasecurity/trivy v0.49.2-0.20240227072422-e1ea02c7b80d diff --git a/go.sum b/go.sum index 08658fcc4533c..388b5cc567818 100644 --- a/go.sum +++ b/go.sum @@ -681,8 +681,8 @@ github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7Oputl github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= github.com/DataDog/agent-payload/v5 v5.0.132 h1:F9wy+iyAgN2QmkEsOlPp3RrQ4vOb4T6k3BXhjSpELS4= github.com/DataDog/agent-payload/v5 v5.0.132/go.mod h1:FgVQKmVdqdmZTbxIptqJC/l+xEzdiXsaAOs/vGAvWzs= -github.com/DataDog/appsec-internal-go v1.7.0 h1:iKRNLih83dJeVya3IoUfK+6HLD/hQsIbyBlfvLmAeb0= -github.com/DataDog/appsec-internal-go v1.7.0/go.mod h1:wW0cRfWBo4C044jHGwYiyh5moQV2x0AhnwqMuiX7O/g= +github.com/DataDog/appsec-internal-go v1.8.0 h1:1Tfn3LEogntRqZtf88twSApOCAAO3V+NILYhuQIo4J4= +github.com/DataDog/appsec-internal-go v1.8.0/go.mod h1:wW0cRfWBo4C044jHGwYiyh5moQV2x0AhnwqMuiX7O/g= github.com/DataDog/aptly v1.5.3 h1:oLsRvjuXSVM4ia0N83dU3KiQeiJ6BaszYbTZOkSfDlw= github.com/DataDog/aptly v1.5.3/go.mod h1:ZL5TfCso+z4enH03N+s3z8tYUJHhL6DlxIvnnP2TbY4= github.com/DataDog/cast v1.3.1-0.20190301154711-1ee8c8bd14a3 h1:SobA9WYm4K/MUtWlbKaomWTmnuYp1KhIm8Wlx3vmpsg= @@ -704,8 +704,8 @@ github.com/DataDog/extendeddaemonset v0.10.0-rc.4 h1:m88E+emuRHIqKgi7kHMd9N0S/Nt github.com/DataDog/extendeddaemonset v0.10.0-rc.4/go.mod h1:uqO05mxbfqPQ8cghfYN2Uhy/eUv4ZEx89cb4P1Z1uz4= github.com/DataDog/go-grpc-bidirectional-streaming-example v0.0.0-20221024060302-b9cf785c02fe h1:RO40ywnX/vZLi4Pb4jRuFGgQQBYGIIoQ6u+P2MIgFOA= github.com/DataDog/go-grpc-bidirectional-streaming-example v0.0.0-20221024060302-b9cf785c02fe/go.mod h1:90sqV0j7E8wYCyqIp5d9HmYWLTFQttqPFFtNYDyAybQ= -github.com/DataDog/go-libddwaf/v3 v3.3.0 h1:jS72fuQpFgJZEdEJDmHJCPAgNTEMZoz1EUvimPUOiJ4= -github.com/DataDog/go-libddwaf/v3 v3.3.0/go.mod h1:Bz/0JkpGf689mzbUjKJeheJINqsyyhM8p9PDuHdK2Ec= +github.com/DataDog/go-libddwaf/v3 v3.4.0 h1:NJ2W2vhYaOm1OWr1LJCbdgp7ezG/XLJcQKBmjFwhSuM= +github.com/DataDog/go-libddwaf/v3 v3.4.0/go.mod h1:n98d9nZ1gzenRSk53wz8l6d34ikxS+hs62A31Fqmyi4= github.com/DataDog/go-sqllexer v0.0.16 h1:RoSUMS6MECyB3gTUIdydzXwK5NhEhv6GMJkS7ptsgRA= github.com/DataDog/go-sqllexer v0.0.16/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= @@ -777,8 +777,8 @@ github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpz github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/Microsoft/hcsshim v0.12.7 h1:MP6R1spmjxTE4EU4J3YsrTxn8CjvN9qwjTKJXldFaRg= -github.com/Microsoft/hcsshim v0.12.7/go.mod h1:HPbAuJ9BvQYYZbB4yEQcyGIsTP5L4yHKeO9XO149AEM= +github.com/Microsoft/hcsshim v0.12.8 h1:BtDWYlFMcWhorrvSSo2M7z0csPdw6t7no/C3FsSvqiI= +github.com/Microsoft/hcsshim v0.12.8/go.mod h1:cibQ4BqhJ32FXDwPdQhKhwrwophnh3FuT4nwQZF907w= github.com/NVIDIA/go-nvml v0.12.4-0 h1:4tkbB3pT1O77JGr0gQ6uD8FrsUPqP1A/EOEm2wI1TUg= github.com/NVIDIA/go-nvml v0.12.4-0/go.mod h1:8Llmj+1Rr+9VGGwZuRer5N/aCjxGuR5nPb/9ebBiIEQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -802,8 +802,8 @@ github.com/acobaugh/osrelease v0.1.0 h1:Yb59HQDGGNhCj4suHaFQQfBps5wyoKLSSX/J/+Ui github.com/acobaugh/osrelease v0.1.0/go.mod h1:4bFEs0MtgHNHBrmHCt67gNisnabCRAlzdVasCEGHTWY= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= -github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= +github.com/agnivade/levenshtein v1.2.0 h1:U9L4IOT0Y3i0TIlUIDJ7rVUziKi/zPbrJGaFrtYH3SY= +github.com/agnivade/levenshtein v1.2.0/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= @@ -1064,14 +1064,14 @@ github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+Bu github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= -github.com/containerd/containerd v1.7.21 h1:USGXRK1eOC/SX0L195YgxTHb0a00anxajOzgfN0qrCA= -github.com/containerd/containerd v1.7.21/go.mod h1:e3Jz1rYRUZ2Lt51YrH9Rz0zPyJBOlSvB3ghr2jbVD8g= +github.com/containerd/containerd v1.7.23 h1:H2CClyUkmpKAGlhQp95g2WXHfLYc7whAuvZGBNYOOwQ= +github.com/containerd/containerd v1.7.23/go.mod h1:7QUzfURqZWCZV7RLNEn1XjUCQLEf0bkaK4GjUaZehxw= github.com/containerd/containerd/api v1.7.19 h1:VWbJL+8Ap4Ju2mx9c9qS1uFSB1OVYr5JJrW2yT5vFoA= github.com/containerd/containerd/api v1.7.19/go.mod h1:fwGavl3LNwAV5ilJ0sbrABL44AQxmNjDRcwheXDb6Ig= github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8= github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= -github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM= -github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0= +github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4= +github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY= github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= @@ -1116,8 +1116,8 @@ github.com/cri-o/ocicni v0.4.3 h1:BfnrZrtr/F+o+b+yOguB1o6I4OzjieF3k3dN4MrsCJA= github.com/cri-o/ocicni v0.4.3/go.mod h1:RzIKSln5AT65hyyfGj3/gsfCpjiY1Y6rVK51Uc5YNzk= github.com/csaf-poc/csaf_distribution/v3 v3.0.0 h1:ob9+Fmpff0YWgTP3dYaw7G2hKQ9cegh9l3zksc+q3sM= github.com/csaf-poc/csaf_distribution/v3 v3.0.0/go.mod h1:uilCTiNKivq+6zrDvjtZaUeLk70oe21iwKivo6ILwlQ= -github.com/cyphar/filepath-securejoin v0.3.3 h1:lofZkCEVFIBe0KcdQOzFs8Soy9oaHOWl4gGtPI+gCFc= -github.com/cyphar/filepath-securejoin v0.3.3/go.mod h1:8s/MCNJREmFK0H02MF6Ihv1nakJe4L/w3WZLHNkvlYM= +github.com/cyphar/filepath-securejoin v0.3.4 h1:VBWugsJh2ZxJmLFSM06/0qzQyiQX2Qs0ViKrUAcqdZ8= +github.com/cyphar/filepath-securejoin v0.3.4/go.mod h1:8s/MCNJREmFK0H02MF6Ihv1nakJe4L/w3WZLHNkvlYM= github.com/datadog/trivy-db v0.0.0-20240228172000-42caffdaee3f h1:IFB3J+f0m2e7nZjPTqvzLrrb6dVU6BQrsGx/7Tmm8Xk= github.com/datadog/trivy-db v0.0.0-20240228172000-42caffdaee3f/go.mod h1:cj9/QmD9N3OZnKQMp+/DvdV+ym3HyIkd4e+F0ZM3ZGs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -1138,8 +1138,8 @@ github.com/dgryski/go-jump v0.0.0-20211018200510-ba001c3ffce0 h1:0wH6nO9QEa02Qx8 github.com/dgryski/go-jump v0.0.0-20211018200510-ba001c3ffce0/go.mod h1:4hKCXuwrJoYvHZxJ86+bRVTOMyJ0Ej+RqfSm8mHi6KA= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g= -github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= +github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7cNTs5R6Hk4V2lcmLz2NsG2VnInyNo= +github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= github.com/digitalocean/godo v1.109.0 h1:4W97RJLJSUQ3veRZDNbp1Ol3Rbn6Lmt9bKGvfqYI5SU= github.com/digitalocean/godo v1.109.0/go.mod h1:R6EmmWI8CT1+fCtjWY9UCB+L5uufuZH13wk3YhxycCs= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= @@ -2060,8 +2060,8 @@ github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3 github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= -github.com/open-policy-agent/opa v0.68.0 h1:Jl3U2vXRjwk7JrHmS19U3HZO5qxQRinQbJ2eCJYSqJQ= -github.com/open-policy-agent/opa v0.68.0/go.mod h1:5E5SvaPwTpwt2WM177I9Z3eT7qUpmOGjk1ZdHs+TZ4w= +github.com/open-policy-agent/opa v0.69.0 h1:s2igLw2Z6IvGWGuXSfugWkVultDMsM9pXiDuMp7ckWw= +github.com/open-policy-agent/opa v0.69.0/go.mod h1:+qyXJGkpEJ6kpB1kGo8JSwHtVXbTdsGdQYPWWNYNj+4= github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.103.0 h1:Kpfqjwp+nlgqacXkSS8T8iGiTMTFo8NoT8AoRomDOpU= github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.103.0/go.mod h1:ymbGC/jEXTq8mgHsxzV1PjVGHmV5hSQXmkYkFfGfuLw= github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.104.0 h1:6dvpPt8pCcV+TfMnnanFk2NQYf9HN1voSS9iIHdW+L8= @@ -2773,8 +2773,8 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= -go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= go.uber.org/fx v1.22.2 h1:iPW+OPxv0G8w75OemJ1RAnTUrF55zOJlXlo1TbJ0Buw= @@ -3631,8 +3631,8 @@ google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHh google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= -gopkg.in/DataDog/dd-trace-go.v1 v1.68.0 h1:8WPoOHJcMAtcxTVKM0DYnFweBjxxfNit3Sjo/rf+Hkw= -gopkg.in/DataDog/dd-trace-go.v1 v1.68.0/go.mod h1:mkZpWVLO/ERW5NqlW+w5d8waQKNvMSTUQLJfoI0vlvw= +gopkg.in/DataDog/dd-trace-go.v1 v1.69.0 h1:zSY6DDsFRMQDNQYKWCv/AEwJXoPpDf1FfMyw7I1B7M8= +gopkg.in/DataDog/dd-trace-go.v1 v1.69.0/go.mod h1:U9AOeBHNAL95JXcd/SPf4a7O5GNeF/yD13sJtli/yaU= gopkg.in/Knetic/govaluate.v3 v3.0.0 h1:18mUyIt4ZlRlFZAAfVetz4/rzlJs9yhN+U02F4u1AOc= gopkg.in/Knetic/govaluate.v3 v3.0.0/go.mod h1:csKLBORsPbafmSCGTEh3U7Ozmsuq8ZSIlKk1bcqph0E= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= diff --git a/pkg/aggregator/demultiplexer.go b/pkg/aggregator/demultiplexer.go index 5a9b76c2b5811..7990ce4973e37 100644 --- a/pkg/aggregator/demultiplexer.go +++ b/pkg/aggregator/demultiplexer.go @@ -10,6 +10,7 @@ import ( pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics" + "github.com/DataDog/datadog-agent/pkg/tagset" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" agentruntime "github.com/DataDog/datadog-agent/pkg/runtime" @@ -94,19 +95,23 @@ func createIterableMetrics( serializer serializer.MetricSerializer, logPayloads bool, isServerless bool, + hostTagProvider *HostTagProvider, ) (*metrics.IterableSeries, *metrics.IterableSketches) { var series *metrics.IterableSeries var sketches *metrics.IterableSketches - + hostTags := hostTagProvider.GetHostTags() if serializer.AreSeriesEnabled() { series = metrics.NewIterableSeries(func(se *metrics.Serie) { if logPayloads { log.Debugf("Flushing serie: %s", se) } + + if hostTags != nil { + se.Tags = tagset.CombineCompositeTagsAndSlice(se.Tags, hostTagProvider.GetHostTags()) + } tagsetTlm.updateHugeSerieTelemetry(se) }, flushAndSerializeInParallel.BufferSize, flushAndSerializeInParallel.ChannelSize) } - if serializer.AreSketchesEnabled() { sketches = metrics.NewIterableSketches(func(sketch *metrics.SketchSeries) { if logPayloads { @@ -115,6 +120,9 @@ func createIterableMetrics( if isServerless { log.DebugfServerless("Sending sketches payload : %s", sketch.String()) } + if hostTags != nil { + sketch.Tags = tagset.CombineCompositeTagsAndSlice(sketch.Tags, hostTagProvider.GetHostTags()) + } tagsetTlm.updateHugeSketchesTelemetry(sketch) }, flushAndSerializeInParallel.BufferSize, flushAndSerializeInParallel.ChannelSize) } diff --git a/pkg/aggregator/demultiplexer_agent.go b/pkg/aggregator/demultiplexer_agent.go index fcd44e572e45c..914e544e91c2b 100644 --- a/pkg/aggregator/demultiplexer_agent.go +++ b/pkg/aggregator/demultiplexer_agent.go @@ -63,6 +63,8 @@ type AgentDemultiplexer struct { senders *senders + hostTagProvider *HostTagProvider + // sharded statsd time samplers statsd } @@ -159,7 +161,6 @@ func initAgentDemultiplexer( bufferSize := pkgconfigsetup.Datadog().GetInt("aggregator_buffer_size") metricSamplePool := metrics.NewMetricSamplePool(MetricSamplePoolBatchSize, utils.IsTelemetryEnabled(pkgconfigsetup.Datadog())) - _, statsdPipelinesCount := GetDogStatsDWorkerAndPipelineCount() log.Debug("the Demultiplexer will use", statsdPipelinesCount, "pipelines") @@ -190,7 +191,6 @@ func initAgentDemultiplexer( } // -- - demux := &AgentDemultiplexer{ log: log, options: options, @@ -208,7 +208,8 @@ func initAgentDemultiplexer( noAggSerializer: noAggSerializer, }, - senders: newSenders(agg), + hostTagProvider: NewHostTagProvider(), + senders: newSenders(agg), // statsd time samplers statsd: statsd{ @@ -400,8 +401,7 @@ func (d *AgentDemultiplexer) flushToSerializer(start time.Time, waitForSerialize } logPayloads := pkgconfigsetup.Datadog().GetBool("log_payloads") - series, sketches := createIterableMetrics(d.aggregator.flushAndSerializeInParallel, d.sharedSerializer, logPayloads, false) - + series, sketches := createIterableMetrics(d.aggregator.flushAndSerializeInParallel, d.sharedSerializer, logPayloads, false, d.hostTagProvider) metrics.Serialize( series, sketches, diff --git a/pkg/aggregator/demultiplexer_serverless.go b/pkg/aggregator/demultiplexer_serverless.go index 97573531f5a9d..686ce5e6c096f 100644 --- a/pkg/aggregator/demultiplexer_serverless.go +++ b/pkg/aggregator/demultiplexer_serverless.go @@ -37,6 +37,8 @@ type ServerlessDemultiplexer struct { flushAndSerializeInParallel FlushAndSerializeInParallel + hostTagProvider *HostTagProvider + *senders } @@ -55,14 +57,14 @@ func InitAndStartServerlessDemultiplexer(keysPerDomain map[string][]string, forw statsdWorker := newTimeSamplerWorker(statsdSampler, DefaultFlushInterval, bufferSize, metricSamplePool, flushAndSerializeInParallel, tagsStore) demux := &ServerlessDemultiplexer{ - log: logger, - forwarder: forwarder, - statsdSampler: statsdSampler, - statsdWorker: statsdWorker, - serializer: serializer, - metricSamplePool: metricSamplePool, - flushLock: &sync.Mutex{}, - + log: logger, + forwarder: forwarder, + statsdSampler: statsdSampler, + statsdWorker: statsdWorker, + serializer: serializer, + metricSamplePool: metricSamplePool, + flushLock: &sync.Mutex{}, + hostTagProvider: NewHostTagProvider(), flushAndSerializeInParallel: flushAndSerializeInParallel, } @@ -105,7 +107,7 @@ func (d *ServerlessDemultiplexer) ForceFlushToSerializer(start time.Time, waitFo defer d.flushLock.Unlock() logPayloads := pkgconfigsetup.Datadog().GetBool("log_payloads") - series, sketches := createIterableMetrics(d.flushAndSerializeInParallel, d.serializer, logPayloads, true) + series, sketches := createIterableMetrics(d.flushAndSerializeInParallel, d.serializer, logPayloads, true, d.hostTagProvider) metrics.Serialize( series, diff --git a/pkg/aggregator/host_tag_provider.go b/pkg/aggregator/host_tag_provider.go new file mode 100644 index 0000000000000..4158756977cc9 --- /dev/null +++ b/pkg/aggregator/host_tag_provider.go @@ -0,0 +1,55 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//nolint:revive +package aggregator + +import ( + "context" + "sync" + + hostMetadataUtils "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/hosttags" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/benbjohnson/clock" +) + +type HostTagProvider struct { + hostTags []string + sync.RWMutex +} + +func NewHostTagProvider() *HostTagProvider { + return newHostTagProviderWithClock(clock.New()) +} + +func newHostTagProviderWithClock(clock clock.Clock) *HostTagProvider { + p := &HostTagProvider{ + hostTags: nil, + } + + duration := pkgconfigsetup.Datadog().GetDuration("expected_tags_duration") + log.Debugf("Adding host tags to metrics for %v", duration) + if pkgconfigsetup.Datadog().GetDuration("expected_tags_duration") > 0 { + p.hostTags = append([]string{}, hostMetadataUtils.Get(context.TODO(), false, pkgconfigsetup.Datadog()).System...) + expectedTagsDeadline := pkgconfigsetup.StartTime.Add(duration) + clock.AfterFunc(expectedTagsDeadline.Sub(clock.Now()), func() { + p.Lock() + defer p.Unlock() + p.hostTags = nil + log.Debugf("host tags for metrics have expired") + }) + } + + return p +} + +func (p *HostTagProvider) GetHostTags() []string { + p.RLock() + defer p.RUnlock() + + return p.hostTags +} diff --git a/pkg/aggregator/host_tag_provider_test.go b/pkg/aggregator/host_tag_provider_test.go new file mode 100644 index 0000000000000..b8b56bc50a044 --- /dev/null +++ b/pkg/aggregator/host_tag_provider_test.go @@ -0,0 +1,73 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package aggregator + +import ( + "testing" + "time" + + "github.com/benbjohnson/clock" + "github.com/stretchr/testify/assert" + + configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" +) + +// TestHostTagProviderNoExpiration checks that tags are not expired when expected_tags_duration is 0 +func TestExpectedTagDurationNotSet(t *testing.T) { + + mockConfig := configmock.New(t) + + tags := []string{"tag1:value1", "tag2:value2", "tag3:value3"} + mockConfig.SetWithoutSource("tags", tags) + defer mockConfig.SetWithoutSource("tags", nil) + + // Setting expected_tags_duration to 0 (no host tags should be added) + mockConfig.SetWithoutSource("expected_tags_duration", "0") + + p := NewHostTagProvider() + + tagList := p.GetHostTags() + + assert.Equal(t, 0, len(tagList)) +} + +// TestHostTagProviderExpectedTags verifies that the tags are returned correctly and then return nil after the expected duration +func TestHostTagProviderExpectedTags(t *testing.T) { + mockConfig := configmock.New(t) + + mockClock := clock.NewMock() + + oldStartTime := pkgconfigsetup.StartTime + pkgconfigsetup.StartTime = mockClock.Now() + defer func() { + pkgconfigsetup.StartTime = oldStartTime + }() + + // Define and set the expected tags + hosttags := []string{"tag1:value1", "tag2:value2", "tag3:value3"} + mockConfig.SetWithoutSource("tags", hosttags) + defer mockConfig.SetWithoutSource("tags", nil) + + // Set the expected tags expiration duration to 5 seconds + expectedTagsDuration := 5 * time.Second + mockConfig.SetWithoutSource("expected_tags_duration", "5s") + defer mockConfig.SetWithoutSource("expected_tags_duration", "0") + + p := newHostTagProviderWithClock(mockClock) + + tagList := p.GetHostTags() + + // Verify that the tags are returned correctly before expiration + assert.Equal(t, hosttags, tagList) + + // Simulate time passing for the expected duration (5 seconds) + mockClock.Add(expectedTagsDuration) + + // Verify that after the expiration time, the tags are no longer returned (nil) + assert.Nil(t, p.GetHostTags()) + +} diff --git a/pkg/aggregator/no_aggregation_stream_worker.go b/pkg/aggregator/no_aggregation_stream_worker.go index 38324f7d7dd01..2569067934bd1 100644 --- a/pkg/aggregator/no_aggregation_stream_worker.go +++ b/pkg/aggregator/no_aggregation_stream_worker.go @@ -49,6 +49,8 @@ type noAggregationStreamWorker struct { samplesChan chan metrics.MetricSampleBatch stopChan chan trigger + hostTagProvider *HostTagProvider + logThrottling util.SimpleThrottler } @@ -95,6 +97,7 @@ func newNoAggregationStreamWorker(maxMetricsPerPayload int, _ *metrics.MetricSam stopChan: make(chan trigger), samplesChan: make(chan metrics.MetricSampleBatch, pkgconfigsetup.Datadog().GetInt("dogstatsd_queue_size")), + hostTagProvider: NewHostTagProvider(), // warning for the unsupported metric types should appear maximum 200 times // every 5 minutes. logThrottling: util.NewSimpleThrottler(200, 5*time.Minute, "Pausing the unsupported metric type warning message for 5m"), @@ -145,7 +148,7 @@ func (w *noAggregationStreamWorker) run() { ticker := time.NewTicker(noAggWorkerStreamCheckFrequency) defer ticker.Stop() logPayloads := pkgconfigsetup.Datadog().GetBool("log_payloads") - w.seriesSink, w.sketchesSink = createIterableMetrics(w.flushConfig, w.serializer, logPayloads, false) + w.seriesSink, w.sketchesSink = createIterableMetrics(w.flushConfig, w.serializer, logPayloads, false, w.hostTagProvider) stopped := false var stopBlockChan chan struct{} @@ -246,7 +249,7 @@ func (w *noAggregationStreamWorker) run() { break } - w.seriesSink, w.sketchesSink = createIterableMetrics(w.flushConfig, w.serializer, logPayloads, false) + w.seriesSink, w.sketchesSink = createIterableMetrics(w.flushConfig, w.serializer, logPayloads, false, w.hostTagProvider) } if stopBlockChan != nil { diff --git a/pkg/clusteragent/admission/controllers/webhook/controller_base.go b/pkg/clusteragent/admission/controllers/webhook/controller_base.go index c24ca71148be0..3bdda115d6e0d 100644 --- a/pkg/clusteragent/admission/controllers/webhook/controller_base.go +++ b/pkg/clusteragent/admission/controllers/webhook/controller_base.go @@ -111,7 +111,7 @@ func (c *controllerBase) generateWebhooks(wmeta workloadmeta.Component, pa workl if c.config.isMutationEnabled() { mutatingWebhooks = []Webhook{ configWebhook.NewWebhook(wmeta, injectionFilter, datadogConfig), - tagsfromlabels.NewWebhook(wmeta, injectionFilter), + tagsfromlabels.NewWebhook(wmeta, datadogConfig, injectionFilter), agentsidecar.NewWebhook(datadogConfig), autoscaling.NewWebhook(pa), } diff --git a/pkg/clusteragent/admission/mutate/tagsfromlabels/tags.go b/pkg/clusteragent/admission/mutate/tagsfromlabels/tags.go index 3e3bd2b155029..355430d527c49 100644 --- a/pkg/clusteragent/admission/mutate/tagsfromlabels/tags.go +++ b/pkg/clusteragent/admission/mutate/tagsfromlabels/tags.go @@ -24,11 +24,11 @@ import ( "k8s.io/client-go/dynamic" "github.com/DataDog/datadog-agent/cmd/cluster-agent/admission" + "github.com/DataDog/datadog-agent/comp/core/config" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/common" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/metrics" mutatecommon "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" - pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -55,14 +55,14 @@ type Webhook struct { } // NewWebhook returns a new Webhook -func NewWebhook(wmeta workloadmeta.Component, injectionFilter mutatecommon.InjectionFilter) *Webhook { +func NewWebhook(wmeta workloadmeta.Component, datadogConfig config.Component, injectionFilter mutatecommon.InjectionFilter) *Webhook { return &Webhook{ name: webhookName, - isEnabled: pkgconfigsetup.Datadog().GetBool("admission_controller.inject_tags.enabled"), - endpoint: pkgconfigsetup.Datadog().GetString("admission_controller.inject_tags.endpoint"), + isEnabled: datadogConfig.GetBool("admission_controller.inject_tags.enabled"), + endpoint: datadogConfig.GetString("admission_controller.inject_tags.endpoint"), resources: []string{"pods"}, operations: []admissionregistrationv1.OperationType{admissionregistrationv1.Create}, - ownerCacheTTL: ownerCacheTTL(), + ownerCacheTTL: ownerCacheTTL(datadogConfig), wmeta: wmeta, injectionFilter: injectionFilter, } @@ -273,10 +273,10 @@ func (w *Webhook) getAndCacheOwner(info *ownerInfo, ns string, dc dynamic.Interf return owner, nil } -func ownerCacheTTL() time.Duration { - if pkgconfigsetup.Datadog().IsSet("admission_controller.pod_owners_cache_validity") { // old option. Kept for backwards compatibility - return pkgconfigsetup.Datadog().GetDuration("admission_controller.pod_owners_cache_validity") * time.Minute +func ownerCacheTTL(datadogConfig config.Component) time.Duration { + if datadogConfig.IsSet("admission_controller.pod_owners_cache_validity") { // old option. Kept for backwards compatibility + return datadogConfig.GetDuration("admission_controller.pod_owners_cache_validity") * time.Minute } - return pkgconfigsetup.Datadog().GetDuration("admission_controller.inject_tags.pod_owners_cache_validity") * time.Minute + return datadogConfig.GetDuration("admission_controller.inject_tags.pod_owners_cache_validity") * time.Minute } diff --git a/pkg/clusteragent/admission/mutate/tagsfromlabels/tags_test.go b/pkg/clusteragent/admission/mutate/tagsfromlabels/tags_test.go index dbe96b72504bb..f18a990e27472 100644 --- a/pkg/clusteragent/admission/mutate/tagsfromlabels/tags_test.go +++ b/pkg/clusteragent/admission/mutate/tagsfromlabels/tags_test.go @@ -20,6 +20,7 @@ import ( kscheme "k8s.io/client-go/kubernetes/scheme" "github.com/DataDog/datadog-agent/comp/core" + "github.com/DataDog/datadog-agent/comp/core/config" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/autoinstrumentation" @@ -171,9 +172,10 @@ func Test_injectTags(t *testing.T) { }, } wmeta := fxutil.Test[workloadmeta.Component](t, core.MockBundle(), workloadmetafxmock.MockModule(workloadmeta.NewParams())) + datadogConfig := fxutil.Test[config.Component](t, core.MockBundle()) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - webhook := NewWebhook(wmeta, autoinstrumentation.GetInjectionFilter()) + webhook := NewWebhook(wmeta, datadogConfig, autoinstrumentation.GetInjectionFilter()) _, err := webhook.injectTags(tt.pod, "ns", nil) assert.NoError(t, err) assert.Len(t, tt.pod.Spec.Containers, 1) @@ -273,7 +275,8 @@ func TestGetAndCacheOwner(t *testing.T) { kubeObj := newUnstructuredWithSpec(map[string]interface{}{"foo": "bar"}) owner := newOwner(kubeObj) wmeta := fxutil.Test[workloadmeta.Component](t, core.MockBundle(), workloadmetafxmock.MockModule(workloadmeta.NewParams())) - webhook := NewWebhook(wmeta, autoinstrumentation.GetInjectionFilter()) + datadogConfig := fxutil.Test[config.Component](t, core.MockBundle()) + webhook := NewWebhook(wmeta, datadogConfig, autoinstrumentation.GetInjectionFilter()) // Cache hit cache.Cache.Set(ownerInfo.buildID(testNamespace), owner, webhook.ownerCacheTTL) diff --git a/pkg/collector/corechecks/oracle/sql_wrappers.go b/pkg/collector/corechecks/oracle/sql_wrappers.go index a628a076afff0..82b0cb2739e36 100644 --- a/pkg/collector/corechecks/oracle/sql_wrappers.go +++ b/pkg/collector/corechecks/oracle/sql_wrappers.go @@ -16,6 +16,14 @@ import ( ) func selectWrapper[T any](c *Check, s T, sql string, binds ...interface{}) error { + if c.db == nil { + // Reconnect if the connection is lost + // If reconnect fails, return the error + err := reconnectOnConnectionLose(c) + if err != nil { + return err + } + } err := c.db.Select(s, sql, binds...) err = handleError(c, &c.db, err) if err != nil { @@ -25,6 +33,12 @@ func selectWrapper[T any](c *Check, s T, sql string, binds ...interface{}) error } func getWrapper[T any](c *Check, s T, sql string, binds ...interface{}) error { + if c.db == nil { + err := reconnectOnConnectionLose(c) + if err != nil { + return err + } + } err := c.db.Get(s, sql, binds...) err = handleError(c, &c.db, err) if err != nil { @@ -131,3 +145,13 @@ func reconnectOnConnectionError(c *Check, db **sqlx.DB, err error) { closeDatabase(c, *db) } } + +func reconnectOnConnectionLose(c *Check) error { + db, err := c.Connect() + if err != nil { + log.Errorf("%s failed to reconnect %s", c.logPrompt, err) + closeDatabase(c, c.db) + } + c.db = db + return err +} diff --git a/pkg/collector/corechecks/servicediscovery/apm/detect.go b/pkg/collector/corechecks/servicediscovery/apm/detect.go index 7b4df2c0738fc..5c71ed5d08f11 100644 --- a/pkg/collector/corechecks/servicediscovery/apm/detect.go +++ b/pkg/collector/corechecks/servicediscovery/apm/detect.go @@ -38,7 +38,7 @@ const ( Injected Instrumentation = "injected" ) -type detector func(pid int, args []string, envs envs.Variables, contextMap usm.DetectorContextMap) Instrumentation +type detector func(ctx usm.DetectionContext) Instrumentation var ( detectorMap = map[language.Language]detector{ @@ -53,15 +53,15 @@ var ( ) // Detect attempts to detect the type of APM instrumentation for the given service. -func Detect(pid int, args []string, envs envs.Variables, lang language.Language, contextMap usm.DetectorContextMap) Instrumentation { +func Detect(lang language.Language, ctx usm.DetectionContext) Instrumentation { // first check to see if the DD_INJECTION_ENABLED is set to tracer - if isInjected(envs) { + if isInjected(ctx.Envs) { return Injected } // different detection for provided instrumentation for each if detect, ok := detectorMap[lang]; ok { - return detect(pid, args, envs, contextMap) + return detect(ctx) } return None @@ -97,8 +97,8 @@ const ( // goDetector detects APM instrumentation for Go binaries by checking for // the presence of the dd-trace-go symbols in the ELF. This only works for // unstripped binaries. -func goDetector(pid int, _ []string, _ envs.Variables, _ usm.DetectorContextMap) Instrumentation { - exePath := kernel.HostProc(strconv.Itoa(pid), "exe") +func goDetector(ctx usm.DetectionContext) Instrumentation { + exePath := kernel.HostProc(strconv.Itoa(ctx.Pid), "exe") elfFile, err := elf.Open(exePath) if err != nil { @@ -143,8 +143,8 @@ func pythonDetectorFromMapsReader(reader io.Reader) Instrumentation { // For example: // 7aef453fc000-7aef453ff000 rw-p 0004c000 fc:06 7895473 /home/foo/.local/lib/python3.10/site-packages/ddtrace/internal/_encoding.cpython-310-x86_64-linux-gnu.so // 7aef45400000-7aef45459000 r--p 00000000 fc:06 7895588 /home/foo/.local/lib/python3.10/site-packages/ddtrace/internal/datadog/profiling/libdd_wrapper.so -func pythonDetector(pid int, _ []string, _ envs.Variables, _ usm.DetectorContextMap) Instrumentation { - mapsPath := kernel.HostProc(strconv.Itoa(pid), "maps") +func pythonDetector(ctx usm.DetectionContext) Instrumentation { + mapsPath := kernel.HostProc(strconv.Itoa(ctx.Pid), "maps") mapsFile, err := os.Open(mapsPath) if err != nil { return None @@ -173,14 +173,14 @@ func isNodeInstrumented(f fs.File) bool { // To check for APM instrumentation, we try to find a package.json in // the parent directories of the service. If found, we then check for a // `dd-trace` entry to be present. -func nodeDetector(_ int, _ []string, _ envs.Variables, contextMap usm.DetectorContextMap) Instrumentation { - pkgJSONPath, ok := contextMap[usm.NodePackageJSONPath] +func nodeDetector(ctx usm.DetectionContext) Instrumentation { + pkgJSONPath, ok := ctx.ContextMap[usm.NodePackageJSONPath] if !ok { log.Debugf("could not get package.json path from context map") return None } - fs, ok := contextMap[usm.ServiceSubFS] + fs, ok := ctx.ContextMap[usm.ServiceSubFS] if !ok { log.Debugf("could not get SubFS for package.json") return None @@ -200,7 +200,7 @@ func nodeDetector(_ int, _ []string, _ envs.Variables, contextMap usm.DetectorCo return None } -func javaDetector(_ int, args []string, envs envs.Variables, _ usm.DetectorContextMap) Instrumentation { +func javaDetector(ctx usm.DetectionContext) Instrumentation { ignoreArgs := map[string]bool{ "-version": true, "-Xshare:dump": true, @@ -208,7 +208,7 @@ func javaDetector(_ int, args []string, envs envs.Variables, _ usm.DetectorConte } // Check simple args on builtIn list. - for _, v := range args { + for _, v := range ctx.Args { if ignoreArgs[v] { return None } @@ -230,7 +230,7 @@ func javaDetector(_ int, args []string, envs envs.Variables, _ usm.DetectorConte "JDPA_OPTS", } for _, name := range toolOptionEnvs { - if val, ok := envs.Get(name); ok { + if val, ok := ctx.Envs.Get(name); ok { if strings.Contains(val, "-javaagent:") && strings.Contains(val, "dd-java-agent.jar") { return Provided } @@ -266,12 +266,12 @@ func dotNetDetectorFromMapsReader(reader io.Reader) Instrumentation { // maps file. Note that this does not work for single-file deployments. // // 785c8a400000-785c8aaeb000 r--s 00000000 fc:06 12762267 /home/foo/.../publish/Datadog.Trace.dll -func dotNetDetector(pid int, _ []string, envs envs.Variables, _ usm.DetectorContextMap) Instrumentation { - if val, ok := envs.Get("CORECLR_ENABLE_PROFILING"); ok && val == "1" { +func dotNetDetector(ctx usm.DetectionContext) Instrumentation { + if val, ok := ctx.Envs.Get("CORECLR_ENABLE_PROFILING"); ok && val == "1" { return Provided } - mapsPath := kernel.HostProc(strconv.Itoa(pid), "maps") + mapsPath := kernel.HostProc(strconv.Itoa(ctx.Pid), "maps") mapsFile, err := os.Open(mapsPath) if err != nil { return None diff --git a/pkg/collector/corechecks/servicediscovery/apm/detect_nix_test.go b/pkg/collector/corechecks/servicediscovery/apm/detect_nix_test.go index efe3b291b289b..578e7bd59a7cb 100644 --- a/pkg/collector/corechecks/servicediscovery/apm/detect_nix_test.go +++ b/pkg/collector/corechecks/servicediscovery/apm/detect_nix_test.go @@ -94,7 +94,8 @@ func Test_javaDetector(t *testing.T) { } for _, d := range data { t.Run(d.name, func(t *testing.T) { - result := javaDetector(0, d.args, envs.NewVariables(d.envs), nil) + ctx := usm.NewDetectionContext(d.args, envs.NewVariables(d.envs), nil) + result := javaDetector(ctx) if result != d.result { t.Errorf("expected %s got %s", d.result, result) } @@ -131,7 +132,9 @@ func Test_nodeDetector(t *testing.T) { for _, d := range data { t.Run(d.name, func(t *testing.T) { - result := nodeDetector(0, nil, envs.NewVariables(nil), d.contextMap) + ctx := usm.NewDetectionContext(nil, envs.NewVariables(nil), nil) + ctx.ContextMap = d.contextMap + result := nodeDetector(ctx) assert.Equal(t, d.result, result) }) } @@ -231,7 +234,8 @@ func TestDotNetDetector(t *testing.T) { t.Run(test.name, func(t *testing.T) { var result Instrumentation if test.maps == "" { - result = dotNetDetector(0, nil, envs.NewVariables(test.envs), nil) + ctx := usm.NewDetectionContext(nil, envs.NewVariables(test.envs), nil) + result = dotNetDetector(ctx) } else { result = dotNetDetectorFromMapsReader(strings.NewReader(test.maps)) } @@ -262,13 +266,16 @@ func TestGoDetector(t *testing.T) { t.Cleanup(func() { _ = cmdWithoutSymbols.Process.Kill() }) - - result := goDetector(os.Getpid(), nil, envs.NewVariables(nil), nil) + ctx := usm.NewDetectionContext(nil, envs.NewVariables(nil), nil) + ctx.Pid = os.Getpid() + result := goDetector(ctx) require.Equal(t, None, result) - result = goDetector(cmdWithSymbols.Process.Pid, nil, envs.NewVariables(nil), nil) + ctx.Pid = cmdWithSymbols.Process.Pid + result = goDetector(ctx) require.Equal(t, Provided, result) - result = goDetector(cmdWithoutSymbols.Process.Pid, nil, envs.NewVariables(nil), nil) + ctx.Pid = cmdWithoutSymbols.Process.Pid + result = goDetector(ctx) require.Equal(t, Provided, result) } diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux.go index 071087398f9f1..a5d9604df5bb3 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_linux.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux.go @@ -342,20 +342,26 @@ func (s *discovery) getServiceInfo(proc *process.Process) (*serviceInfo, error) return nil, err } - contextMap := make(usm.DetectorContextMap) - root := kernel.HostProc(strconv.Itoa(int(proc.Pid)), "root") lang := language.FindInArgs(exe, cmdline) if lang == "" { lang = language.FindUsingPrivilegedDetector(s.privilegedDetector, proc.Pid) } - envs, err := getTargetEnvs(proc) + env, err := getTargetEnvs(proc) if err != nil { return nil, err } - nameMeta := servicediscovery.GetServiceName(cmdline, envs, root, lang, contextMap) - apmInstrumentation := apm.Detect(int(proc.Pid), cmdline, envs, lang, contextMap) + contextMap := make(usm.DetectorContextMap) + contextMap[usm.ServiceProc] = proc + + fs := usm.NewSubDirFS(root) + ctx := usm.NewDetectionContext(cmdline, env, fs) + ctx.Pid = int(proc.Pid) + ctx.ContextMap = contextMap + + nameMeta := servicediscovery.GetServiceName(lang, ctx) + apmInstrumentation := apm.Detect(lang, ctx) return &serviceInfo{ generatedName: nameMeta.Name, diff --git a/pkg/collector/corechecks/servicediscovery/service_detector.go b/pkg/collector/corechecks/servicediscovery/service_detector.go index aed9b77ec98df..10a4db408229f 100644 --- a/pkg/collector/corechecks/servicediscovery/service_detector.go +++ b/pkg/collector/corechecks/servicediscovery/service_detector.go @@ -9,7 +9,6 @@ import ( "slices" "strings" - "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/envs" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/language" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/usm" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" @@ -62,8 +61,7 @@ func fixupMetadata(meta usm.ServiceMetadata, lang language.Language) usm.Service // GetServiceName gets the service name based on the command line arguments and // the list of environment variables. -func GetServiceName(cmdline []string, env envs.Variables, root string, lang language.Language, contextMap usm.DetectorContextMap) usm.ServiceMetadata { - fs := usm.NewSubDirFS(root) - meta, _ := usm.ExtractServiceMetadata(cmdline, env, fs, lang, contextMap) +func GetServiceName(lang language.Language, ctx usm.DetectionContext) usm.ServiceMetadata { + meta, _ := usm.ExtractServiceMetadata(lang, ctx) return fixupMetadata(meta, lang) } diff --git a/pkg/collector/corechecks/servicediscovery/usm/jboss.go b/pkg/collector/corechecks/servicediscovery/usm/jboss.go index 242da6b72b3f6..727f840cdefd2 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/jboss.go +++ b/pkg/collector/corechecks/servicediscovery/usm/jboss.go @@ -93,7 +93,7 @@ func newJbossExtractor(ctx DetectionContext) vendorExtractor { // It detects if the instance is standalone or part of a cluster (domain). It returns a slice of jeeDeployment and a bool. // That will be false in case no deployments have been found func (j jbossExtractor) findDeployedApps(domainHome string) ([]jeeDeployment, bool) { - baseDir, ok := extractJavaPropertyFromArgs(j.cxt.args, jbossHomeDirSysProp) + baseDir, ok := extractJavaPropertyFromArgs(j.cxt.Args, jbossHomeDirSysProp) if !ok { log.Debug("jboss: unable to extract the home directory") return nil, false @@ -102,15 +102,15 @@ func (j jbossExtractor) findDeployedApps(domainHome string) ([]jeeDeployment, bo // real life, but the tests do do it. JBoss/WildFly docs imply that this is // normally an absolute path (since it's set to JBOSS_HOME by default and a // lot of other paths are resolved relative to this one). - if cwd, ok := workingDirFromEnvs(j.cxt.envs); ok { + if cwd, ok := workingDirFromEnvs(j.cxt.Envs); ok { baseDir = abs(baseDir, cwd) } - serverName, domainMode := jbossExtractServerName(j.cxt.args) + serverName, domainMode := jbossExtractServerName(j.cxt.Args) if domainMode && len(serverName) == 0 { log.Debug("jboss: domain mode with missing server name") return nil, false } - configFile := jbossExtractConfigFileName(j.cxt.args, domainMode) + configFile := jbossExtractConfigFileName(j.cxt.Args, domainMode) var deployments []jbossServerDeployment var err error if domainMode { diff --git a/pkg/collector/corechecks/servicediscovery/usm/jee.go b/pkg/collector/corechecks/servicediscovery/usm/jee.go index 94d1c3d7f1218..8980a5ef42df1 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/jee.go +++ b/pkg/collector/corechecks/servicediscovery/usm/jee.go @@ -134,7 +134,7 @@ func (je jeeExtractor) resolveAppServer() (serverVendor, string) { var baseDir string // jboss in domain mode does not expose the domain base dir but that path can be derived from the logging configuration var julConfigFile string - for _, a := range je.ctx.args { + for _, a := range je.ctx.Args { if serverHomeHint == unknown { switch { case strings.HasPrefix(a, wlsHomeSysProp): @@ -312,7 +312,7 @@ func (je jeeExtractor) extractServiceNamesForJEEServer() []string { return nil } extractor := extractorCreator(je.ctx) - cwd, ok := workingDirFromEnvs(je.ctx.envs) + cwd, ok := workingDirFromEnvs(je.ctx.Envs) if ok { domainHome = abs(domainHome, cwd) } diff --git a/pkg/collector/corechecks/servicediscovery/usm/nodejs.go b/pkg/collector/corechecks/servicediscovery/usm/nodejs.go index 2a611fb907c76..dd0f31d3986a5 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/nodejs.go +++ b/pkg/collector/corechecks/servicediscovery/usm/nodejs.go @@ -31,7 +31,7 @@ func isJs(path string) bool { func (n nodeDetector) detect(args []string) (ServiceMetadata, bool) { skipNext := false - cwd, _ := workingDirFromEnvs(n.ctx.envs) + cwd, _ := workingDirFromEnvs(n.ctx.Envs) for _, a := range args { if skipNext { skipNext = false @@ -96,8 +96,8 @@ func (n nodeDetector) findNameFromNearestPackageJSON(absFilePath string) (string foundServiceName := ok && len(value) > 0 if foundServiceName { // Save package.json path for the instrumentation detector to use. - n.ctx.contextMap[NodePackageJSONPath] = currentFilePath - n.ctx.contextMap[ServiceSubFS] = n.ctx.fs + n.ctx.ContextMap[NodePackageJSONPath] = currentFilePath + n.ctx.ContextMap[ServiceSubFS] = n.ctx.fs } return value, foundServiceName diff --git a/pkg/collector/corechecks/servicediscovery/usm/nodejs_test.go b/pkg/collector/corechecks/servicediscovery/usm/nodejs_test.go index e157545dfd6b9..0fb3c509d72d6 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/nodejs_test.go +++ b/pkg/collector/corechecks/servicediscovery/usm/nodejs_test.go @@ -43,7 +43,7 @@ func TestFindNameFromNearestPackageJSON(t *testing.T) { t.Run(tt.name, func(t *testing.T) { instance := &nodeDetector{ctx: DetectionContext{ fs: NewSubDirFS(full), - contextMap: make(DetectorContextMap), + ContextMap: make(DetectorContextMap), }} value, ok := instance.findNameFromNearestPackageJSON(tt.path) assert.Equal(t, len(tt.expected) > 0, ok) diff --git a/pkg/collector/corechecks/servicediscovery/usm/python.go b/pkg/collector/corechecks/servicediscovery/usm/python.go index ed55e63f7f16f..58e458cc51f44 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/python.go +++ b/pkg/collector/corechecks/servicediscovery/usm/python.go @@ -50,7 +50,7 @@ func (p pythonDetector) detect(args []string) (ServiceMetadata, bool) { } if !shouldSkipArg { - wd, _ := workingDirFromEnvs(p.ctx.envs) + wd, _ := workingDirFromEnvs(p.ctx.Envs) absPath := abs(a, wd) fi, err := fs.Stat(p.ctx.fs, absPath) if err != nil { @@ -114,13 +114,13 @@ func (p pythonDetector) findNearestTopLevel(fp string) string { } func (g gunicornDetector) detect(args []string) (ServiceMetadata, bool) { - if fromEnv, ok := extractEnvVar(g.ctx.envs, gunicornEnvCmdArgs); ok { + if fromEnv, ok := extractEnvVar(g.ctx.Envs, gunicornEnvCmdArgs); ok { name, ok := extractGunicornNameFrom(strings.Split(fromEnv, " ")) if ok { return NewServiceMetadata(name), true } } - if wsgiApp, ok := extractEnvVar(g.ctx.envs, wsgiAppEnv); ok && len(wsgiApp) > 0 { + if wsgiApp, ok := extractEnvVar(g.ctx.Envs, wsgiAppEnv); ok && len(wsgiApp) > 0 { return NewServiceMetadata(parseNameFromWsgiApp(wsgiApp)), true } diff --git a/pkg/collector/corechecks/servicediscovery/usm/ruby.go b/pkg/collector/corechecks/servicediscovery/usm/ruby.go new file mode 100644 index 0000000000000..9c36d112ac99e --- /dev/null +++ b/pkg/collector/corechecks/servicediscovery/usm/ruby.go @@ -0,0 +1,103 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +package usm + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io/fs" + "path" + "regexp" + + "github.com/shirou/gopsutil/v3/process" + + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +var ( + moduleRegexp = regexp.MustCompile(`module\s+([A-Z][a-zA-Z0-9_]*)`) + matchFirstCap = regexp.MustCompile("(.)([A-Z][a-z]+)") + matchAllCap = regexp.MustCompile("([a-z0-9])([A-Z])") +) + +type railsDetector struct { + ctx DetectionContext +} + +func newRailsDetector(ctx DetectionContext) detector { + return &railsDetector{ctx} +} + +// detect checks if the service is a Rails application by looking for a +// `config/application.rb` file generated by `rails new` when a new rails +// project is created. This file should contain a `module` declaration with the +// application name. +func (r railsDetector) detect(_ []string) (ServiceMetadata, bool) { + var proc *process.Process + + if procEntry, ok := r.ctx.ContextMap[ServiceProc]; ok { + if p, ok := procEntry.(*process.Process); ok { + proc = p + } else { + log.Errorf("could not get process object in rails detector: got type %T", procEntry) + } + } + + cwd, err := proc.Cwd() + if err != nil { + log.Debugf("could not get cwd of process: %s", err) + return ServiceMetadata{}, false + } + + absFile := path.Join(cwd, "config/application.rb") + if _, err := fs.Stat(r.ctx.fs, absFile); err != nil { + return ServiceMetadata{}, false + } + + name, err := r.findRailsApplicationName(absFile) + if err != nil { + log.Debugf("could not find ruby application name: %s", err) + return ServiceMetadata{}, false + } + + return NewServiceMetadata(string(name)), true +} + +// findRailsApplicationName scans the `config/application.rb` file to find the +// Rails application name. +func (r railsDetector) findRailsApplicationName(filename string) ([]byte, error) { + file, err := r.ctx.fs.Open(filename) + if err != nil { + return nil, fmt.Errorf("could not open application.rb: %w", err) + } + defer file.Close() + + reader, err := SizeVerifiedReader(file) + if err != nil { + return nil, fmt.Errorf("skipping application.rb (%q): %w", filename, err) + } + + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + matches := moduleRegexp.FindSubmatch(scanner.Bytes()) + if len(matches) >= 2 { + return railsUnderscore(matches[1]), nil + } + } + + // No match found + return nil, errors.New("could not find Ruby module name") +} + +// railsUnderscore converts a PascalCasedWord to a snake_cased_word. +// It keeps uppercase acronyms together when converting (e.g. "HTTPServer" -> "http_server"). +func railsUnderscore(pascalCasedWord []byte) []byte { + snake := matchFirstCap.ReplaceAll(pascalCasedWord, []byte("${1}_${2}")) + snake = matchAllCap.ReplaceAll(snake, []byte("${1}_${2}")) + return bytes.ToLower(snake) +} diff --git a/pkg/collector/corechecks/servicediscovery/usm/ruby_test.go b/pkg/collector/corechecks/servicediscovery/usm/ruby_test.go new file mode 100644 index 0000000000000..e1bc7b221e1e9 --- /dev/null +++ b/pkg/collector/corechecks/servicediscovery/usm/ruby_test.go @@ -0,0 +1,118 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux + +package usm + +import ( + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGenerateNameFromRailsApplicationRb(t *testing.T) { + tests := []struct { + name string + path string + expected string + shouldError bool + }{ + { + name: "name is found", + path: "./testdata/ruby/app.rb", + expected: "rails_hello", + shouldError: false, + }, + { + name: "name not found", + path: "./testdata/ruby/app_invalid.rb", + expected: "", + shouldError: true, + }, + { + name: "accronym in module name", + path: "./testdata/ruby/app_accronym.rb", + expected: "http_server", + shouldError: false, + }, + { + name: "file does not exists", + path: "./testdata/ruby/application_does_not_exist.rb", + expected: "", + shouldError: true, + }, + } + full, err := filepath.Abs("testdata/root") + require.NoError(t, err) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + instance := &railsDetector{ctx: DetectionContext{ + fs: NewSubDirFS(full), + ContextMap: make(DetectorContextMap), + }} + + value, err := instance.findRailsApplicationName(tt.path) + + if tt.shouldError { + assert.Error(t, err, "did not get an error when we should have. Got service name: %s", value) + assert.Nil(t, value, "got an non-empty result: %s", value) + return + } + + assert.True(t, len(tt.expected) > 0, "got an empty result") + assert.Equal(t, tt.expected, string(value)) + }) + } +} + +func TestRailsUnderscore(t *testing.T) { + tests := []struct { + name string + given string + expected string + }{ + { + name: "one word", + given: "Service", + expected: "service", + }, + { + name: "accronym is preserved", + given: "HTTPServer", + expected: "http_server", + }, + { + name: "numbers in module name", + given: "HTTP2Server", + expected: "http2_server", + }, + { + name: "multiple words", + given: "VeryLongServiceName", + expected: "very_long_service_name", + }, + // NOTE: the following cases should never happen in practice + { + name: "already snake case", + given: "service_name", + expected: "service_name", + }, + { + name: "empty name", + given: "", + expected: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := railsUnderscore([]byte(tt.given)) + assert.Equal(t, tt.expected, string(got)) + }) + } +} diff --git a/pkg/collector/corechecks/servicediscovery/usm/service.go b/pkg/collector/corechecks/servicediscovery/usm/service.go index f6b6c1948d945..ff8409ba323dc 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/service.go +++ b/pkg/collector/corechecks/servicediscovery/usm/service.go @@ -35,6 +35,8 @@ const ( NodePackageJSONPath = iota // ServiceSubFS The SubdirFS instance package.json path is valid in. ServiceSubFS = iota + // The pointer to the Process instance of the service + ServiceProc = iota ) const ( @@ -108,17 +110,24 @@ func newDotnetDetector(ctx DetectionContext) detector { // DetectionContext allows to detect ServiceMetadata. type DetectionContext struct { - args []string - envs envs.Variables - fs fs.SubFS - contextMap DetectorContextMap + // Pid process PID + Pid int + // Args the command line arguments of the process + Args []string + // Envs targeted environment variables of the process + Envs envs.Variables + // Fs provides access to a file system + fs fs.SubFS + // DetectorContextMap a map to pass data between detectors, like some paths. + ContextMap DetectorContextMap } // NewDetectionContext initializes DetectionContext. func NewDetectionContext(args []string, envs envs.Variables, fs fs.SubFS) DetectionContext { return DetectionContext{ - args: args, - envs: envs, + Pid: 0, + Args: args, + Envs: envs, fs: fs, } } @@ -181,8 +190,9 @@ var languageDetectors = map[language.Language]detectorCreatorFn{ // Map executables that usually have additional process context of what's // running, to context detectors var executableDetectors = map[string]detectorCreatorFn{ - "sudo": newSimpleDetector, "gunicorn": newGunicornDetector, + "puma": newRailsDetector, + "sudo": newSimpleDetector, } func serviceNameInjected(envs envs.Variables) bool { @@ -198,14 +208,8 @@ func serviceNameInjected(envs envs.Variables) bool { } // ExtractServiceMetadata attempts to detect ServiceMetadata from the given process. -func ExtractServiceMetadata(args []string, envs envs.Variables, fs fs.SubFS, lang language.Language, contextMap DetectorContextMap) (metadata ServiceMetadata, success bool) { - dc := DetectionContext{ - args: args, - envs: envs, - fs: fs, - contextMap: contextMap, - } - cmd := dc.args +func ExtractServiceMetadata(lang language.Language, ctx DetectionContext) (metadata ServiceMetadata, success bool) { + cmd := ctx.Args if len(cmd) == 0 || len(cmd[0]) == 0 { return } @@ -213,9 +217,9 @@ func ExtractServiceMetadata(args []string, envs envs.Variables, fs fs.SubFS, lan // We always return a service name from here on success = true - if value, ok := chooseServiceNameFromEnvs(dc.envs); ok { + if value, ok := chooseServiceNameFromEnvs(ctx.Envs); ok { metadata.DDService = value - metadata.DDServiceInjected = serviceNameInjected(envs) + metadata.DDServiceInjected = serviceNameInjected(ctx.Envs) } exe := cmd[0] @@ -244,7 +248,7 @@ func ExtractServiceMetadata(args []string, envs envs.Variables, fs fs.SubFS, lan } if ok { - langMeta, ok := detectorProvider(dc).detect(cmd[1:]) + langMeta, ok := detectorProvider(ctx).detect(cmd[1:]) // The detector could return a DD Service name (eg. Java, from the // dd.service property), but still fail to generate a service name (ok = diff --git a/pkg/collector/corechecks/servicediscovery/usm/service_test.go b/pkg/collector/corechecks/servicediscovery/usm/service_test.go index ea35911d3a575..6a35749695a5b 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/service_test.go +++ b/pkg/collector/corechecks/servicediscovery/usm/service_test.go @@ -624,7 +624,9 @@ func TestExtractServiceMetadata(t *testing.T) { if tt.fs != nil { fs = *tt.fs } - meta, ok := ExtractServiceMetadata(tt.cmdline, envs.NewVariables(tt.envs), fs, tt.lang, make(DetectorContextMap)) + ctx := NewDetectionContext(tt.cmdline, envs.NewVariables(tt.envs), fs) + ctx.ContextMap = make(DetectorContextMap) + meta, ok := ExtractServiceMetadata(tt.lang, ctx) if len(tt.expectedGeneratedName) == 0 && len(tt.expectedDDService) == 0 { require.False(t, ok) } else { diff --git a/pkg/collector/corechecks/servicediscovery/usm/spring.go b/pkg/collector/corechecks/servicediscovery/usm/spring.go index ccce818dcc1a6..ecc86415b7d26 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/spring.go +++ b/pkg/collector/corechecks/servicediscovery/usm/spring.go @@ -293,7 +293,7 @@ func newSpringBootArchiveSourceFromReader(reader *zip.Reader, patternMap map[str // the jar path and the application arguments. // When resolving properties, it supports placeholder resolution (a = ${b} -> will lookup then b) func (s springBootParser) GetSpringBootAppName(jarname string) (string, bool) { - cwd, _ := workingDirFromEnvs(s.ctx.envs) + cwd, _ := workingDirFromEnvs(s.ctx.Envs) absName := abs(jarname, cwd) file, err := s.ctx.fs.Open(absName) if err != nil { @@ -317,9 +317,9 @@ func (s springBootParser) GetSpringBootAppName(jarname string) (string, bool) { log.Debugf("parsing information from spring boot archive: %q", jarname) combined := &props.Combined{Sources: []props.PropertyGetter{ - newArgumentSource(s.ctx.args, "--"), - newArgumentSource(s.ctx.args, "-D"), - newEnvironmentSource(s.ctx.envs), + newArgumentSource(s.ctx.Args, "--"), + newArgumentSource(s.ctx.Args, "-D"), + newEnvironmentSource(s.ctx.Envs), }} // resolved properties referring to other properties (thanks to the Expander) diff --git a/pkg/collector/corechecks/servicediscovery/usm/testdata/root/testdata/ruby/app.rb b/pkg/collector/corechecks/servicediscovery/usm/testdata/root/testdata/ruby/app.rb new file mode 100644 index 0000000000000..6af9567680208 --- /dev/null +++ b/pkg/collector/corechecks/servicediscovery/usm/testdata/root/testdata/ruby/app.rb @@ -0,0 +1,38 @@ +require_relative "boot" + +require "rails" +# Pick the frameworks you want: +require "active_model/railtie" +# require "active_job/railtie" +require "active_record/railtie" +# require "active_storage/engine" +require "action_controller/railtie" +# require "action_mailer/railtie" +# require "action_mailbox/engine" +# require "action_text/engine" +require "action_view/railtie" +# require "action_cable/engine" +require "sprockets/railtie" +require "rails/test_unit/railtie" + +# Require the gems listed in Gemfile, including any gems +# you've limited to :test, :development, or :production. +Bundler.require(*Rails.groups) + +module RailsHello + class Application < Rails::Application + # Initialize configuration defaults for originally generated Rails version. + config.load_defaults 6.1 + + # Configuration for the application, engines, and railties goes here. + # + # These settings can be overridden in specific environments using the files + # in config/environments, which are processed later. + # + # config.time_zone = "Central Time (US & Canada)" + # config.eager_load_paths << Rails.root.join("extras") + + # Don't generate system test files. + config.generators.system_tests = nil + end +end diff --git a/pkg/collector/corechecks/servicediscovery/usm/testdata/root/testdata/ruby/app_accronym.rb b/pkg/collector/corechecks/servicediscovery/usm/testdata/root/testdata/ruby/app_accronym.rb new file mode 100644 index 0000000000000..5e33caace3fc1 --- /dev/null +++ b/pkg/collector/corechecks/servicediscovery/usm/testdata/root/testdata/ruby/app_accronym.rb @@ -0,0 +1 @@ +module HTTPServer diff --git a/pkg/collector/corechecks/servicediscovery/usm/testdata/root/testdata/ruby/app_invalid.rb b/pkg/collector/corechecks/servicediscovery/usm/testdata/root/testdata/ruby/app_invalid.rb new file mode 100644 index 0000000000000..5120fe0be00a0 --- /dev/null +++ b/pkg/collector/corechecks/servicediscovery/usm/testdata/root/testdata/ruby/app_invalid.rb @@ -0,0 +1,2 @@ +class SomeRubyClass +end diff --git a/pkg/collector/corechecks/servicediscovery/usm/weblogic.go b/pkg/collector/corechecks/servicediscovery/usm/weblogic.go index 17c70dcfa1999..2f78b12086f1b 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/weblogic.go +++ b/pkg/collector/corechecks/servicediscovery/usm/weblogic.go @@ -53,7 +53,7 @@ func newWeblogicExtractor(ctx DetectionContext) vendorExtractor { // The args is required here because used to determine the current server name. // it returns paths for staged only applications and bool being true if at least one application is found func (we weblogicExtractor) findDeployedApps(domainHome string) ([]jeeDeployment, bool) { - serverName, ok := extractJavaPropertyFromArgs(we.ctx.args, wlsServerNameSysProp) + serverName, ok := extractJavaPropertyFromArgs(we.ctx.Args, wlsServerNameSysProp) if !ok { return nil, false } diff --git a/pkg/collector/corechecks/servicediscovery/usm/websphere.go b/pkg/collector/corechecks/servicediscovery/usm/websphere.go index 592c525ef0526..c16aa296777e7 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/websphere.go +++ b/pkg/collector/corechecks/servicediscovery/usm/websphere.go @@ -82,11 +82,11 @@ func isApplicationDeployed(fs fs.FS, descriptorPath string, nodeName string, ser // findDeployedApps finds applications that are enabled in a domainHome for the matched cell, node and server // If nothing false, it returns false func (we websphereExtractor) findDeployedApps(domainHome string) ([]jeeDeployment, bool) { - n := len(we.ctx.args) + n := len(we.ctx.Args) if n < 3 { return nil, false } - cellName, nodeName, serverName := we.ctx.args[n-3], we.ctx.args[n-2], we.ctx.args[n-1] + cellName, nodeName, serverName := we.ctx.Args[n-3], we.ctx.Args[n-2], we.ctx.Args[n-1] if len(cellName) == 0 || len(nodeName) == 0 || len(serverName) == 0 { return nil, false } diff --git a/pkg/collector/corechecks/system/wincrashdetect/probe/crashparse.go b/pkg/collector/corechecks/system/wincrashdetect/probe/crashparse.go index c4fb188d2e5d2..38fe8119ba9cd 100644 --- a/pkg/collector/corechecks/system/wincrashdetect/probe/crashparse.go +++ b/pkg/collector/corechecks/system/wincrashdetect/probe/crashparse.go @@ -23,6 +23,7 @@ import ( // allow us to change for testing var readfn = doReadCrashDump +var parseCrashDump = parseWinCrashDump type logCallbackContext struct { loglines []string @@ -107,14 +108,14 @@ func doReadCrashDump(filename string, ctx *logCallbackContext, exterr *uint32) e return nil } -func parseCrashDump(wcs *WinCrashStatus) { +func parseWinCrashDump(wcs *WinCrashStatus) { var ctx logCallbackContext var extendedError uint32 err := readfn(wcs.FileName, &ctx, &extendedError) if err != nil { - wcs.Success = false + wcs.StatusCode = WinCrashStatusCodeFailed wcs.ErrString = fmt.Sprintf("Failed to load crash dump file %v %x", err, extendedError) log.Errorf("Failed to open crash dump %s: %v %x", wcs.FileName, err, extendedError) return @@ -122,7 +123,7 @@ func parseCrashDump(wcs *WinCrashStatus) { if len(ctx.loglines) < 2 { wcs.ErrString = fmt.Sprintf("Invalid crash dump file %s", wcs.FileName) - wcs.Success = false + wcs.StatusCode = WinCrashStatusCodeFailed return } @@ -190,5 +191,5 @@ func parseCrashDump(wcs *WinCrashStatus) { wcs.Offender = callsite break } - wcs.Success = true + wcs.StatusCode = WinCrashStatusCodeSuccess } diff --git a/pkg/collector/corechecks/system/wincrashdetect/probe/crashparse_test.go b/pkg/collector/corechecks/system/wincrashdetect/probe/crashparse_test.go index 161b3b6068119..2efd8d5f21781 100644 --- a/pkg/collector/corechecks/system/wincrashdetect/probe/crashparse_test.go +++ b/pkg/collector/corechecks/system/wincrashdetect/probe/crashparse_test.go @@ -51,12 +51,11 @@ func TestCrashParser(t *testing.T) { FileName: "testdata/crashsample1.txt", } // first read in the sample data - - readfn = testCrashReader + OverrideCrashDumpReader(testCrashReader) parseCrashDump(wcs) - assert.True(t, wcs.Success) + assert.Equal(t, WinCrashStatusCodeSuccess, wcs.StatusCode) assert.Empty(t, wcs.ErrString) assert.Equal(t, "Mon Jun 26 20:44:49.742 2023 (UTC - 7:00)", wcs.DateString) before, _, _ := strings.Cut(wcs.Offender, "+") @@ -72,11 +71,11 @@ func TestCrashParserWithLineSplits(t *testing.T) { } // first read in the sample data - readfn = testCrashReaderWithLineSplits + OverrideCrashDumpReader(testCrashReaderWithLineSplits) parseCrashDump(wcs) - assert.True(t, wcs.Success) + assert.Equal(t, WinCrashStatusCodeSuccess, wcs.StatusCode) assert.Empty(t, wcs.ErrString) assert.Equal(t, "Mon Jun 26 20:44:49.742 2023 (UTC - 7:00)", wcs.DateString) before, _, _ := strings.Cut(wcs.Offender, "+") diff --git a/pkg/collector/corechecks/system/wincrashdetect/probe/win_crash_types.go b/pkg/collector/corechecks/system/wincrashdetect/probe/win_crash_types.go index 5fac5c9853b17..a1a36691be08a 100644 --- a/pkg/collector/corechecks/system/wincrashdetect/probe/win_crash_types.go +++ b/pkg/collector/corechecks/system/wincrashdetect/probe/win_crash_types.go @@ -29,10 +29,26 @@ const ( DumpTypeAutomatic = int(7) // automatic ) +const ( + // WinCrashStatusCodeUnknown indicates an invalid or corrupted code. + WinCrashStatusCodeUnknown = int(-1) + + // WinCrashStatusCodeSuccess indicates that crash dump processing succeeded + // or no crash dump was found. + WinCrashStatusCodeSuccess = int(0) + + // WinCrashStatusCodeBusy indicates that crash dump processing is still busy + // and no result is yet available. + WinCrashStatusCodeBusy = int(1) + + // WinCrashStatusCodeFailed indicates that crash dump processing failed or had an error. + WinCrashStatusCodeFailed = int(2) +) + // WinCrashStatus defines all of the information returned from the system // probe to the caller type WinCrashStatus struct { - Success bool `json:"success"` + StatusCode int `json:"statuscode"` ErrString string `json:"errstring"` FileName string `json:"filename"` Type int `json:"dumptype"` diff --git a/pkg/collector/corechecks/system/wincrashdetect/probe/wincrash_testutil.go b/pkg/collector/corechecks/system/wincrashdetect/probe/wincrash_testutil.go new file mode 100644 index 0000000000000..c2f52b0071589 --- /dev/null +++ b/pkg/collector/corechecks/system/wincrashdetect/probe/wincrash_testutil.go @@ -0,0 +1,26 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build test && windows + +package probe + +type readCrashDumpType func(filename string, ctx *logCallbackContext, _ *uint32) error +type parseCrashDumpType func(wcs *WinCrashStatus) + +// SetCachedSettings sets the settings used for tests without reading the Registry. +func (p *WinCrashProbe) SetCachedSettings(wcs *WinCrashStatus) { + p.status = wcs +} + +// OverrideCrashDumpReader relpaces the crash dump reading function for tests. +func OverrideCrashDumpReader(customCrashReader readCrashDumpType) { + readfn = customCrashReader +} + +// OverrideCrashDumpParser relpaces the crash dump parsing function for tests. +func OverrideCrashDumpParser(customParseCrashDump parseCrashDumpType) { + parseCrashDump = customParseCrashDump +} diff --git a/pkg/collector/corechecks/system/wincrashdetect/probe/wincrashprobe.go b/pkg/collector/corechecks/system/wincrashdetect/probe/wincrashprobe.go index 1d75c514142df..533cfcc2c431b 100644 --- a/pkg/collector/corechecks/system/wincrashdetect/probe/wincrashprobe.go +++ b/pkg/collector/corechecks/system/wincrashdetect/probe/wincrashprobe.go @@ -11,38 +11,126 @@ import ( "fmt" "os" "path/filepath" + "sync" sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" "github.com/DataDog/datadog-agent/pkg/util/winutil" "golang.org/x/sys/windows/registry" ) +type probeState uint32 + +const ( + // Idle indicates that the probe is waiting for a request + idle probeState = iota + + // Busy indicates that the probe is currently processing a crash dump + busy + + // Completed indicates that the probe finished processing a crash dump. + completed + + // Failed indicates that the probe failed to process a crash dump. + failed +) + // WinCrashProbe has no stored state. type WinCrashProbe struct { + state probeState + status *WinCrashStatus + mu sync.Mutex } // NewWinCrashProbe returns an initialized WinCrashProbe func NewWinCrashProbe(_ *sysconfigtypes.Config) (*WinCrashProbe, error) { - return &WinCrashProbe{}, nil + return &WinCrashProbe{ + state: idle, + status: nil, + }, nil +} + +// Handles crash dump parsing in a separate thread since this may take very long. +func (p *WinCrashProbe) parseCrashDumpAsync() { + if p.status == nil { + p.state = failed + return + } + + parseCrashDump(p.status) + + p.mu.Lock() + defer p.mu.Unlock() + p.state = completed } // Get returns the current crash, if any func (p *WinCrashProbe) Get() *WinCrashStatus { wcs := &WinCrashStatus{} - err := wcs.getCurrentCrashSettings() - if err != nil { - wcs.ErrString = err.Error() - wcs.Success = false - return wcs - } + // Nothing in this method should take long. + p.mu.Lock() + defer p.mu.Unlock() + + switch p.state { + case idle: + if p.status == nil { + // This is a new request. + err := wcs.getCurrentCrashSettings() + if err != nil { + wcs.ErrString = err.Error() + wcs.StatusCode = WinCrashStatusCodeFailed + } + } else { + // Use cached settings, set by tests. + // Make a copy to avoid side-effect modifications. + *wcs = *(p.status) + } - if len(wcs.FileName) == 0 { - // no filename means no crash dump - wcs.Success = true // we succeeded - return wcs + // Transition to the next state. + if wcs.StatusCode == WinCrashStatusCodeFailed { + // Only try once and cache the failure. + p.status = wcs + p.state = failed + } else if len(wcs.FileName) == 0 { + // No filename means no crash dump + p.status = wcs + p.state = completed + wcs.StatusCode = WinCrashStatusCodeSuccess + } else { + // Kick off the crash dump processing asynchronously. + // The crash dump may be very large and we should not block for a response. + p.state = busy + wcs.StatusCode = WinCrashStatusCodeBusy + + // Make a new copy of the wcs for async processing while returning "Busy" + // for the current response. + p.status = &WinCrashStatus{ + FileName: wcs.FileName, + Type: wcs.Type, + } + + go p.parseCrashDumpAsync() + } + + case busy: + // The crash dump processing is not done yet. Reply busy. + if p.status != nil { + wcs.FileName = p.status.FileName + wcs.Type = p.status.Type + } + wcs.StatusCode = WinCrashStatusCodeBusy + + case failed: + fallthrough + case completed: + // The crash dump processing was done, return the result. + if p.status != nil { + // This result is cached for all subsequent queries. + wcs = p.status + } else { + wcs.StatusCode = WinCrashStatusCodeFailed + } } - parseCrashDump(wcs) return wcs } diff --git a/pkg/collector/corechecks/system/wincrashdetect/wincrashdetect_windows_test.go b/pkg/collector/corechecks/system/wincrashdetect/wincrashdetect_windows_test.go index ffb9ce3647dbc..ff1cdb0ced1c8 100644 --- a/pkg/collector/corechecks/system/wincrashdetect/wincrashdetect_windows_test.go +++ b/pkg/collector/corechecks/system/wincrashdetect/wincrashdetect_windows_test.go @@ -10,11 +10,10 @@ package wincrashdetect import ( "net" "net/http" - - //"strings" + "sync" "testing" + "time" - //"github.com/stretchr/testify/require" "github.com/stretchr/testify/assert" "github.com/DataDog/datadog-agent/cmd/system-probe/utils" @@ -108,7 +107,7 @@ func TestWinCrashReporting(t *testing.T) { // set the return value handled in the check handler above p = &probe.WinCrashStatus{ - Success: true, + StatusCode: probe.WinCrashStatusCodeSuccess, } check := newCheck() @@ -128,7 +127,7 @@ func TestWinCrashReporting(t *testing.T) { testSetup(t) defer testCleanup() p = &probe.WinCrashStatus{ - Success: true, + StatusCode: probe.WinCrashStatusCodeSuccess, FileName: `c:\windows\memory.dmp`, Type: probe.DumpTypeAutomatic, DateString: `Fri Jun 30 15:33:05.086 2023 (UTC - 7:00)`, @@ -201,3 +200,163 @@ func TestWinCrashReporting(t *testing.T) { mock.AssertNumberOfCalls(t, "Commit", 2) }) } + +func TestCrashReportingStates(t *testing.T) { + var crashStatus *probe.WinCrashStatus + + listener, closefunc := createSystemProbeListener() + defer closefunc() + + pkgconfigsetup.InitSystemProbeConfig(pkgconfigsetup.SystemProbe()) + + mux := http.NewServeMux() + server := http.Server{ + Handler: mux, + } + defer server.Close() + + cp, err := probe.NewWinCrashProbe(nil) + assert.NotNil(t, cp) + assert.Nil(t, err) + + wg := sync.WaitGroup{} + + // This will artificially delay the "parsing" to ensure the first check gets a "busy" status. + delayedCrashDumpParser := func(wcs *probe.WinCrashStatus) { + time.Sleep(4 * time.Second) + + assert.Equal(t, `c:\windows\memory.dmp`, wcs.FileName) + assert.Equal(t, probe.DumpTypeAutomatic, wcs.Type) + + wcs.StatusCode = probe.WinCrashStatusCodeSuccess + wcs.ErrString = crashStatus.ErrString + wcs.DateString = crashStatus.DateString + wcs.Offender = crashStatus.Offender + wcs.BugCheck = crashStatus.BugCheck + + // Signal that the artificial delay is done. + wg.Done() + } + + // This ensures that no crash dump parsing should happen. + noCrashDumpParser := func(_ *probe.WinCrashStatus) { + assert.FailNow(t, "Should not parse") + } + + mux.Handle("/windows_crash_detection/check", http.HandlerFunc(func(rw http.ResponseWriter, _ *http.Request) { + results := cp.Get() + utils.WriteAsJSON(rw, results) + })) + mux.Handle("/debug/stats", http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) { + })) + go server.Serve(listener) + + t.Run("test reporting a crash with a busy intermediate state", func(t *testing.T) { + testSetup(t) + defer testCleanup() + + check := newCheck() + crashCheck := check.(*WinCrashDetect) + mock := mocksender.NewMockSender(crashCheck.ID()) + err := crashCheck.Configure(mock.GetSenderManager(), 0, nil, nil, "") + assert.NoError(t, err) + + crashStatus = &probe.WinCrashStatus{ + StatusCode: probe.WinCrashStatusCodeSuccess, + FileName: `c:\windows\memory.dmp`, + Type: probe.DumpTypeAutomatic, + ErrString: "", + DateString: `Fri Jun 30 15:33:05.086 2023 (UTC - 7:00)`, + Offender: `somedriver.sys`, + BugCheck: "0x00000007", + } + + // Test the 2-check response from crash reporting. + cp.SetCachedSettings(crashStatus) + probe.OverrideCrashDumpParser(delayedCrashDumpParser) + + // First run should be "busy" and not return an event yet. + wg.Add(1) + err = crashCheck.Run() + assert.Nil(t, err) + mock.AssertNumberOfCalls(t, "Gauge", 0) + mock.AssertNumberOfCalls(t, "Rate", 0) + mock.AssertNumberOfCalls(t, "Event", 0) + mock.AssertNumberOfCalls(t, "Commit", 0) + + // Wait for the artificial delay to finish, plus a small time buffer. + wg.Wait() + time.Sleep(4 * time.Second) + + expected := event.Event{ + Priority: event.PriorityNormal, + SourceTypeName: CheckName, + EventType: CheckName, + AlertType: event.AlertTypeError, + Title: formatTitle(crashStatus), + Text: formatText(crashStatus), + } + + mock.On("Event", expected).Return().Times(1) + mock.On("Commit").Return().Times(1) + + // The result should be available now. + err = crashCheck.Run() + assert.Nil(t, err) + mock.AssertNumberOfCalls(t, "Gauge", 0) + mock.AssertNumberOfCalls(t, "Rate", 0) + mock.AssertNumberOfCalls(t, "Event", 1) + mock.AssertNumberOfCalls(t, "Commit", 1) + }) + + t.Run("test that no crash is reported", func(t *testing.T) { + testSetup(t) + defer testCleanup() + + check := newCheck() + crashCheck := check.(*WinCrashDetect) + mock := mocksender.NewMockSender(crashCheck.ID()) + err := crashCheck.Configure(mock.GetSenderManager(), 0, nil, nil, "") + assert.NoError(t, err) + + noCrashStatus := &probe.WinCrashStatus{ + StatusCode: probe.WinCrashStatusCodeSuccess, + FileName: "", + } + + // Test finding no crashes. The response should be immediate. + cp.SetCachedSettings(noCrashStatus) + probe.OverrideCrashDumpParser(noCrashDumpParser) + err = crashCheck.Run() + assert.Nil(t, err) + mock.AssertNumberOfCalls(t, "Gauge", 0) + mock.AssertNumberOfCalls(t, "Rate", 0) + mock.AssertNumberOfCalls(t, "Event", 0) + mock.AssertNumberOfCalls(t, "Commit", 0) + }) + + t.Run("test failure on reading crash settings", func(t *testing.T) { + testSetup(t) + defer testCleanup() + + check := newCheck() + crashCheck := check.(*WinCrashDetect) + mock := mocksender.NewMockSender(crashCheck.ID()) + err := crashCheck.Configure(mock.GetSenderManager(), 0, nil, nil, "") + assert.NoError(t, err) + + failedStatus := &probe.WinCrashStatus{ + StatusCode: probe.WinCrashStatusCodeFailed, + ErrString: "Mocked failure", + } + + // Test having a failure reading setings. The response should be immediate. + cp.SetCachedSettings(failedStatus) + probe.OverrideCrashDumpParser(noCrashDumpParser) + err = crashCheck.Run() + assert.NotNil(t, err) + mock.AssertNumberOfCalls(t, "Rate", 0) + mock.AssertNumberOfCalls(t, "Event", 0) + mock.AssertNumberOfCalls(t, "Commit", 0) + }) +} diff --git a/pkg/collector/python/test_util.go b/pkg/collector/python/test_util.go index 2f4404694c939..02ffca4587210 100644 --- a/pkg/collector/python/test_util.go +++ b/pkg/collector/python/test_util.go @@ -59,7 +59,7 @@ func testGetSubprocessOutputUnknownBin(t *testing.T) { assert.Equal(t, "", C.GoString(cStdout)) assert.Equal(t, "", C.GoString(cStderr)) assert.Equal(t, C.int(0), cRetCode) - assert.NotNil(t, exception) + assert.Nil(t, exception) } func testGetSubprocessOutputError(t *testing.T) { diff --git a/pkg/collector/python/util.go b/pkg/collector/python/util.go index 4d66ae653c8c2..8486cc7c74117 100644 --- a/pkg/collector/python/util.go +++ b/pkg/collector/python/util.go @@ -68,11 +68,7 @@ func GetSubprocessOutput(argv **C.char, env **C.char, cStdout **C.char, cStderr outputErr, _ = io.ReadAll(stderr) }() - err = cmd.Start() - if err != nil { - *exception = TrackedCString(fmt.Sprintf("internal error starting subprocess: %v", err)) - return - } + cmd.Start() //nolint:errcheck // Wait for the pipes to be closed *before* waiting for the cmd to exit, as per os.exec docs wg.Wait() diff --git a/pkg/config/model/viper.go b/pkg/config/model/viper.go index ff66d1db00543..126c5fe782ad9 100644 --- a/pkg/config/model/viper.go +++ b/pkg/config/model/viper.go @@ -71,12 +71,31 @@ var sources = []Source{ SourceCLI, } +// sourcesPriority give each source a priority, the higher the more important a source. This is used when merging +// configuration tree (a higher priority overwrites a lower one). +var sourcesPriority = map[Source]int{ + SourceDefault: 0, + SourceUnknown: 1, + SourceFile: 2, + SourceEnvVar: 3, + SourceFleetPolicies: 4, + SourceAgentRuntime: 5, + SourceLocalConfigProcess: 6, + SourceRC: 7, + SourceCLI: 8, +} + // ValueWithSource is a tuple for a source and a value, not necessarily the applied value in the main config type ValueWithSource struct { Source Source Value interface{} } +// IsGreaterThan returns true if the current source is of higher priority than the one given as a parameter +func (s Source) IsGreaterThan(x Source) bool { + return sourcesPriority[s] > sourcesPriority[x] +} + // String casts Source into a string func (s Source) String() string { // Safeguard: if we don't know the Source, we assume SourceUnknown diff --git a/pkg/config/nodetreemodel/conversion.go b/pkg/config/nodetreemodel/conversion.go new file mode 100644 index 0000000000000..c167ff6c9e25a --- /dev/null +++ b/pkg/config/nodetreemodel/conversion.go @@ -0,0 +1,182 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package nodetreemodel + +import ( + "fmt" + "strconv" + "strings" +) + +func toString(v interface{}) (string, error) { + switch it := v.(type) { + case int, int8, int16, int32, int64: + num, err := toInt(v) + if err != nil { + return "", err + } + stringVal := strconv.FormatInt(int64(num), 10) + return stringVal, nil + case uint, uint8, uint16, uint32, uint64: + num, err := toInt(v) + if err != nil { + return "", err + } + stringVal := strconv.FormatUint(uint64(num), 10) + return stringVal, nil + case float32: + return strconv.FormatFloat(float64(it), 'f', -1, 32), nil + case float64: + return strconv.FormatFloat(it, 'f', -1, 64), nil + case string: + return it, nil + } + return "", newConversionError(v, "string") +} + +func toFloat(v interface{}) (float64, error) { + switch it := v.(type) { + case int: + return float64(it), nil + case int8: + return float64(it), nil + case int16: + return float64(it), nil + case int32: + return float64(it), nil + case int64: + return float64(it), nil + case uint: + return float64(it), nil + case uint8: + return float64(it), nil + case uint16: + return float64(it), nil + case uint32: + return float64(it), nil + case uint64: + return float64(it), nil + case float32: + return float64(it), nil + case float64: + return float64(it), nil + } + return 0, newConversionError(v, "float") +} + +func toInt(v interface{}) (int, error) { + switch it := v.(type) { + case int: + return int(it), nil + case int8: + return int(it), nil + case int16: + return int(it), nil + case int32: + return int(it), nil + case int64: + return int(it), nil + case uint: + return int(it), nil + case uint8: + return int(it), nil + case uint16: + return int(it), nil + case uint32: + return int(it), nil + case uint64: + return int(it), nil + case float32: + return int(it), nil + case float64: + return int(it), nil + } + return 0, newConversionError(v, "int") +} + +func toBool(v interface{}) (bool, error) { + switch it := v.(type) { + case bool: + return it, nil + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + num, err := toInt(v) + if err != nil { + return false, err + } + return num != 0, nil + case string: + return convertToBool(it) + default: + return false, newConversionError(v, "bool") + } +} + +func toStringSlice(v interface{}) ([]string, error) { //nolint: unused // TODO: fix + switch it := v.(type) { + case []string: + return it, nil + case []interface{}: + res := make([]string, len(it)) + for idx, item := range it { + sItem, err := toString(item) + if err != nil { + return nil, err + } + res[idx] = sItem + } + return res, nil + default: + return nil, newConversionError(v, "slice of string") + } +} + +func toFloatSlice(v interface{}) ([]float64, error) { //nolint: unused // TODO: fix + switch it := v.(type) { + case []float64: + return it, nil + case []interface{}: + res := make([]float64, len(it)) + for idx, item := range it { + sItem, err := toFloat(item) + if err != nil { + return nil, err + } + res[idx] = sItem + } + return res, nil + default: + return nil, newConversionError(v, "slice float64") + } +} + +// convert a string to a bool using standard yaml constants +func convertToBool(text string) (bool, error) { + lower := strings.ToLower(text) + if lower == "y" || lower == "yes" || lower == "on" || lower == "true" || lower == "1" { + return true, nil + } else if lower == "n" || lower == "no" || lower == "off" || lower == "false" || lower == "0" { + return false, nil + } + return false, newConversionError(text, "bool") +} + +func newConversionError(v interface{}, expectType string) error { + return fmt.Errorf("could not convert to %s: %v of type %T", expectType, v, v) +} + +func mapInterfaceToMapString(m map[interface{}]interface{}) map[string]interface{} { + res := make(map[string]interface{}, len(m)) + for k, v := range m { + mk := "" + if str, ok := k.(string); ok { + mk = str + } else { + mk = fmt.Sprintf("%s", k) + } + res[mk] = v + } + return res +} diff --git a/pkg/config/nodetreemodel/leaf.go b/pkg/config/nodetreemodel/leaf.go new file mode 100644 index 0000000000000..a3a4290c8ca68 --- /dev/null +++ b/pkg/config/nodetreemodel/leaf.go @@ -0,0 +1,137 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package nodetreemodel + +import ( + "fmt" + "time" + + "github.com/DataDog/datadog-agent/pkg/config/model" +) + +// ArrayNode represents a node with ordered, numerically indexed set of children +type ArrayNode interface { + Size() int + Index(int) (Node, error) +} + +type arrayNodeImpl struct { + nodes []Node +} + +func newArrayNodeImpl(v []interface{}, source model.Source) (Node, error) { + nodes := make([]Node, 0, len(v)) + for _, it := range v { + if n, ok := it.(Node); ok { + nodes = append(nodes, n) + continue + } + n, err := NewNode(it, source) + if err != nil { + return nil, err + } + nodes = append(nodes, n) + } + return &arrayNodeImpl{nodes: nodes}, nil +} + +// GetChild returns an error because array node does not have children accessible by name +func (n *arrayNodeImpl) GetChild(string) (Node, error) { + return nil, fmt.Errorf("arrayNodeImpl.GetChild not implemented") +} + +// ChildrenKeys returns an error because array node does not have children accessible by name +func (n *arrayNodeImpl) ChildrenKeys() ([]string, error) { + return nil, fmt.Errorf("arrayNodeImpl.ChildrenKeys not implemented") +} + +// Size returns number of children in the list +func (n *arrayNodeImpl) Size() int { + return len(n.nodes) +} + +// Index returns the kth element of the list +func (n *arrayNodeImpl) Index(k int) (Node, error) { + if k < 0 || k >= len(n.nodes) { + return nil, ErrNotFound + } + return n.nodes[k], nil +} + +var _ ArrayNode = (*arrayNodeImpl)(nil) +var _ Node = (*arrayNodeImpl)(nil) + +// leafNode represents a leaf with a scalar value + +type leafNodeImpl struct { + // val must be a scalar kind + val interface{} + source model.Source +} + +func newLeafNodeImpl(v interface{}, source model.Source) (Node, error) { + if isScalar(v) { + return &leafNodeImpl{val: v, source: source}, nil + } + return nil, fmt.Errorf("cannot create leaf node from %v of type %T", v, v) +} + +var _ LeafNode = (*leafNodeImpl)(nil) +var _ Node = (*leafNodeImpl)(nil) + +// GetChild returns an error because a leaf has no children +func (n *leafNodeImpl) GetChild(key string) (Node, error) { + return nil, fmt.Errorf("can't GetChild(%s) of a leaf node", key) +} + +// ChildrenKeys returns an error because a leaf has no children +func (n *leafNodeImpl) ChildrenKeys() ([]string, error) { + return nil, fmt.Errorf("can't get ChildrenKeys of a leaf node") +} + +// GetAny returns the scalar as an interface +func (n *leafNodeImpl) GetAny() (interface{}, error) { + return n, nil +} + +// GetBool returns the scalar as a bool, or an error otherwise +func (n *leafNodeImpl) GetBool() (bool, error) { + return toBool(n.val) +} + +// GetInt returns the scalar as a int, or an error otherwise +func (n *leafNodeImpl) GetInt() (int, error) { + return toInt(n.val) +} + +// GetFloat returns the scalar as a float64, or an error otherwise +func (n *leafNodeImpl) GetFloat() (float64, error) { + return toFloat(n.val) +} + +// GetString returns the scalar as a string, or an error otherwise +func (n *leafNodeImpl) GetString() (string, error) { + return toString(n.val) +} + +// GetTime returns the scalar as a time, or an error otherwise, not implemented +func (n *leafNodeImpl) GetTime() (time.Time, error) { + return time.Time{}, fmt.Errorf("not implemented") +} + +// GetDuration returns the scalar as a duration, or an error otherwise, not implemented +func (n *leafNodeImpl) GetDuration() (time.Duration, error) { + return time.Duration(0), fmt.Errorf("not implemented") +} + +// Set assigns a value in the config, for the given source +func (n *leafNodeImpl) SetWithSource(newValue interface{}, source model.Source) error { + // TODO: enforce type-checking, return an error if type changes + n.val = newValue + n.source = source + // TODO: Record previous value and source + return nil +} diff --git a/pkg/config/nodetreemodel/node.go b/pkg/config/nodetreemodel/node.go index 3687bf24bc1fc..e13580041fcbe 100644 --- a/pkg/config/nodetreemodel/node.go +++ b/pkg/config/nodetreemodel/node.go @@ -8,7 +8,6 @@ package nodetreemodel import ( "fmt" "slices" - "strconv" "strings" "time" @@ -20,17 +19,17 @@ import ( var ErrNotFound = fmt.Errorf("not found") // NewNode constructs a Node from either a map, a slice, or a scalar value -func NewNode(v interface{}) (Node, error) { +func NewNode(v interface{}, source model.Source) (Node, error) { switch it := v.(type) { case map[interface{}]interface{}: - return newMapNodeImpl(mapInterfaceToMapString(it)) + return newMapNodeImpl(mapInterfaceToMapString(it), source) case map[string]interface{}: - return newMapNodeImpl(it) + return newMapNodeImpl(it, source) case []interface{}: - return newArrayNodeImpl(it) + return newArrayNodeImpl(it, source) } if isScalar(v) { - return newLeafNodeImpl(v) + return newLeafNodeImpl(v, source) } // Finally, try determining node type using reflection, should only be needed for unit tests that // supply data that isn't one of the "plain" types produced by parsing json, yaml, etc @@ -41,6 +40,12 @@ func NewNode(v interface{}) (Node, error) { return node, err } +// Node represents an arbitrary node +type Node interface { + GetChild(string) (Node, error) + ChildrenKeys() ([]string, error) +} + // LeafNode represents a leaf node of the config type LeafNode interface { GetAny() (interface{}, error) @@ -53,75 +58,27 @@ type LeafNode interface { SetWithSource(interface{}, model.Source) error } -// ArrayNode represents a node with ordered, numerically indexed set of children -type ArrayNode interface { - Size() int - Index(int) (Node, error) -} - -// Node represents an arbitrary node -type Node interface { - GetChild(string) (Node, error) - ChildrenKeys() ([]string, error) -} - -// leafNode represents a leaf with a scalar value - -type leafNodeImpl struct { - // val must be a scalar kind - val interface{} - source model.Source -} - -func newLeafNodeImpl(v interface{}) (Node, error) { - if isScalar(v) { - return &leafNodeImpl{val: v}, nil - } - return nil, fmt.Errorf("cannot create leaf node from %v of type %T", v, v) -} - -var _ LeafNode = (*leafNodeImpl)(nil) -var _ Node = (*leafNodeImpl)(nil) - -// arrayNode represents a node with an ordered array of children - -type arrayNodeImpl struct { - nodes []Node +// innerNode represents an non-leaf node of the config +type innerNode struct { + val map[string]Node + // remapCase maps each lower-case key to the original case. This + // enables GetChild to retrieve values using case-insensitive keys + remapCase map[string]string } -func newArrayNodeImpl(v []interface{}) (Node, error) { - nodes := make([]Node, 0, len(v)) - for _, it := range v { - if n, ok := it.(Node); ok { - nodes = append(nodes, n) - continue - } - n, err := NewNode(it) +func newMapNodeImpl(v map[string]interface{}, source model.Source) (*innerNode, error) { + children := map[string]Node{} + for name, value := range v { + n, err := NewNode(value, source) if err != nil { return nil, err } - nodes = append(nodes, n) + children[name] = n } - return &arrayNodeImpl{nodes: nodes}, nil + return &innerNode{val: children, remapCase: makeRemapCase(children)}, nil } -var _ ArrayNode = (*arrayNodeImpl)(nil) -var _ Node = (*arrayNodeImpl)(nil) - -// node represents an arbitrary node of the tree - -type mapNodeImpl struct { - val map[string]interface{} - // remapCase maps each lower-case key to the original case. This - // enables GetChild to retrieve values using case-insensitive keys - remapCase map[string]string -} - -func newMapNodeImpl(v map[string]interface{}) (Node, error) { - return &mapNodeImpl{val: v, remapCase: makeRemapCase(v)}, nil -} - -var _ Node = (*mapNodeImpl)(nil) +var _ Node = (*innerNode)(nil) /////// @@ -139,7 +96,7 @@ func isScalar(v interface{}) bool { } // creates a map that converts keys from their lower-cased version to their original case -func makeRemapCase(m map[string]interface{}) map[string]string { +func makeRemapCase(m map[string]Node) map[string]string { remap := make(map[string]string) for k := range m { remap[strings.ToLower(k)] = k @@ -147,230 +104,77 @@ func makeRemapCase(m map[string]interface{}) map[string]string { return remap } -func mapInterfaceToMapString(m map[interface{}]interface{}) map[string]interface{} { - res := make(map[string]interface{}, len(m)) - for k, v := range m { - mk := "" - if str, ok := k.(string); ok { - mk = str - } else { - mk = fmt.Sprintf("%s", k) - } - res[mk] = v - } - return res -} - ///// // GetChild returns the child node at the given case-insensitive key, or an error if not found -func (n *mapNodeImpl) GetChild(key string) (Node, error) { +func (n *innerNode) GetChild(key string) (Node, error) { mkey := n.remapCase[strings.ToLower(key)] child, found := n.val[mkey] if !found { return nil, ErrNotFound } - // If the map is already storing a Node, return it - if n, ok := child.(Node); ok { - return n, nil - } - // Otherwise construct a new node - return NewNode(child) -} - -// ChildrenKeys returns the list of keys of the children of the given node, if it is a map -func (n *mapNodeImpl) ChildrenKeys() ([]string, error) { - mapkeys := maps.Keys(n.val) - // map keys are iterated non-deterministically, sort them - slices.Sort(mapkeys) - return mapkeys, nil -} - -// GetChild returns an error because array node does not have children accessible by name -func (n *arrayNodeImpl) GetChild(string) (Node, error) { - return nil, fmt.Errorf("arrayNodeImpl.GetChild not implemented") -} - -// ChildrenKeys returns an error because array node does not have children accessible by name -func (n *arrayNodeImpl) ChildrenKeys() ([]string, error) { - return nil, fmt.Errorf("arrayNodeImpl.ChildrenKeys not implemented") -} - -// Size returns number of children in the list -func (n *arrayNodeImpl) Size() int { - return len(n.nodes) -} - -// Index returns the kth element of the list -func (n *arrayNodeImpl) Index(k int) (Node, error) { - if k < 0 || k >= len(n.nodes) { - return nil, ErrNotFound - } - return n.nodes[k], nil -} - -// GetChild returns an error because a leaf has no children -func (n *leafNodeImpl) GetChild(key string) (Node, error) { - return nil, fmt.Errorf("can't GetChild(%s) of a leaf node", key) -} - -// ChildrenKeys returns an error because a leaf has no children -func (n *leafNodeImpl) ChildrenKeys() ([]string, error) { - return nil, fmt.Errorf("can't get ChildrenKeys of a leaf node") + return child, nil } -// GetAny returns the scalar as an interface -func (n *leafNodeImpl) GetAny() (interface{}, error) { - return n, nil -} - -// GetBool returns the scalar as a bool, or an error otherwise -func (n *leafNodeImpl) GetBool() (bool, error) { - switch it := n.val.(type) { - case bool: - return it, nil - case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: - num, err := n.GetInt() - if err != nil { - return false, err - } - return num != 0, nil - case string: - return convertToBool(it) - default: - return false, newConversionError(n, "bool") - } -} - -// GetInt returns the scalar as a int, or an error otherwise -func (n *leafNodeImpl) GetInt() (int, error) { - switch it := n.val.(type) { - case int: - return int(it), nil - case int8: - return int(it), nil - case int16: - return int(it), nil - case int32: - return int(it), nil - case int64: - return int(it), nil - case uint: - return int(it), nil - case uint8: - return int(it), nil - case uint16: - return int(it), nil - case uint32: - return int(it), nil - case uint64: - return int(it), nil - case float32: - return int(it), nil - case float64: - return int(it), nil - } - return 0, newConversionError(n.val, "int") -} - -// GetFloat returns the scalar as a float64, or an error otherwise -func (n *leafNodeImpl) GetFloat() (float64, error) { - switch it := n.val.(type) { - case int: - return float64(it), nil - case int8: - return float64(it), nil - case int16: - return float64(it), nil - case int32: - return float64(it), nil - case int64: - return float64(it), nil - case uint: - return float64(it), nil - case uint8: - return float64(it), nil - case uint16: - return float64(it), nil - case uint32: - return float64(it), nil - case uint64: - return float64(it), nil - case float32: - return float64(it), nil - case float64: - return float64(it), nil +// Merge mergs src node within current tree +func (n *innerNode) Merge(srcNode Node) error { + src, ok := srcNode.(*innerNode) + if !ok { + return fmt.Errorf("can't merge leaf into a node") } - return 0, newConversionError(n.val, "float") -} -// GetString returns the scalar as a string, or an error otherwise -func (n *leafNodeImpl) GetString() (string, error) { - switch it := n.val.(type) { - case int, int8, int16, int32, int64: - num, err := n.GetInt() - if err != nil { - return "", err - } - stringVal := strconv.FormatInt(int64(num), 10) - return stringVal, nil - case uint, uint8, uint16, uint32, uint64: - num, err := n.GetInt() - if err != nil { - return "", err - } - stringVal := strconv.FormatUint(uint64(num), 10) - return stringVal, nil - case float32: - f, err := n.GetFloat() - if err != nil { - return "", err - } - stringVal := strconv.FormatFloat(f, 'f', -1, 32) - return stringVal, nil - case float64: - f, err := n.GetFloat() - if err != nil { - return "", err + childrenNames := maps.Keys(src.val) + // map keys are iterated non-deterministically, sort them + slices.Sort(childrenNames) + + for _, name := range childrenNames { + child := src.val[name] + srcLeaf, srcIsLeaf := child.(*leafNodeImpl) + + if _, ok := n.val[name]; !ok { + // child from src is unknown, we must create a new node + if srcIsLeaf { + n.val[name] = &leafNodeImpl{ + val: srcLeaf.val, + source: srcLeaf.source, + } + } else { + newNode := &innerNode{ + val: map[string]Node{}, + remapCase: map[string]string{}, + } + if err := newNode.Merge(src.val[name]); err != nil { + return err + } + n.val[name] = newNode + } + } else { + // We alredy have child with the same name: update our child + dstLeaf, dstIsLeaf := n.val[name].(*leafNodeImpl) + if srcIsLeaf != dstIsLeaf { + return fmt.Errorf("tree conflict, can't merge lead and non leaf nodes for '%s'", name) + } + + if srcIsLeaf { + if srcLeaf.source.IsGreaterThan(dstLeaf.source) { + dstLeaf.val = srcLeaf.val + dstLeaf.source = srcLeaf.source + } + } else { + if err := n.val[name].(*innerNode).Merge(child); err != nil { + return err + } + } } - stringVal := strconv.FormatFloat(f, 'f', -1, 64) - return stringVal, nil - case string: - return it, nil } - return "", newConversionError(n.val, "string") -} - -// GetTime returns the scalar as a time, or an error otherwise, not implemented -func (n *leafNodeImpl) GetTime() (time.Time, error) { - return time.Time{}, fmt.Errorf("not implemented") -} - -// GetDuration returns the scalar as a duration, or an error otherwise, not implemented -func (n *leafNodeImpl) GetDuration() (time.Duration, error) { - return time.Duration(0), fmt.Errorf("not implemented") -} - -// Set assigns a value in the config, for the given source -func (n *leafNodeImpl) SetWithSource(newValue interface{}, source model.Source) error { - // TODO: enforce type-checking, return an error if type changes - n.val = newValue - n.source = source - // TODO: Record previous value and source + n.remapCase = makeRemapCase(n.val) return nil } -// convert a string to a bool using standard yaml constants -func convertToBool(text string) (bool, error) { - lower := strings.ToLower(text) - if lower == "y" || lower == "yes" || lower == "on" || lower == "true" || lower == "1" { - return true, nil - } else if lower == "n" || lower == "no" || lower == "off" || lower == "false" || lower == "0" { - return false, nil - } - return false, newConversionError(text, "bool") -} - -func newConversionError(v interface{}, expectType string) error { - return fmt.Errorf("could not convert to %s: %v of type %T", expectType, v, v) +// ChildrenKeys returns the list of keys of the children of the given node, if it is a map +func (n *innerNode) ChildrenKeys() ([]string, error) { + mapkeys := maps.Keys(n.val) + // map keys are iterated non-deterministically, sort them + slices.Sort(mapkeys) + return mapkeys, nil } diff --git a/pkg/config/nodetreemodel/node_test.go b/pkg/config/nodetreemodel/node_test.go index 7cf34c9ceb1be..450464f3eb525 100644 --- a/pkg/config/nodetreemodel/node_test.go +++ b/pkg/config/nodetreemodel/node_test.go @@ -8,7 +8,9 @@ package nodetreemodel import ( "testing" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNewNodeAndNodeMethods(t *testing.T) { @@ -21,7 +23,7 @@ func TestNewNodeAndNodeMethods(t *testing.T) { }, } - n, err := NewNode(obj) + n, err := NewNode(obj, model.SourceDefault) assert.NoError(t, err) keys, err := n.ChildrenKeys() @@ -64,3 +66,141 @@ func TestNewNodeAndNodeMethods(t *testing.T) { _, err = third.GetChild("e") assert.NoError(t, err) } + +func TestMergeToEmpty(t *testing.T) { + obj := map[string]interface{}{ + "a": "apple", + "b": 123, + "c": map[string]interface{}{ + "d": true, + "e": map[string]interface{}{ + "f": 456, + }, + }, + } + + src, err := NewNode(obj, model.SourceFile) + require.NoError(t, err) + + dst, err := newMapNodeImpl(nil, model.SourceDefault) + require.NoError(t, err) + + err = dst.Merge(src) + require.NoError(t, err) + + expected := &innerNode{ + remapCase: map[string]string{"a": "a", "b": "b", "c": "c"}, + val: map[string]Node{ + "a": &leafNodeImpl{val: "apple", source: model.SourceFile}, + "b": &leafNodeImpl{val: 123, source: model.SourceFile}, + "c": &innerNode{ + remapCase: map[string]string{"d": "d", "e": "e"}, + val: map[string]Node{ + "d": &leafNodeImpl{val: true, source: model.SourceFile}, + "e": &innerNode{ + remapCase: map[string]string{"f": "f"}, + val: map[string]Node{ + "f": &leafNodeImpl{val: 456, source: model.SourceFile}, + }, + }, + }, + }, + }, + } + assert.Equal(t, expected, dst) +} + +func TestMergeTwoTree(t *testing.T) { + obj := map[string]interface{}{ + "a": "apple", + "b": 123, + "c": map[string]interface{}{ + "d": true, + "e": map[string]interface{}{ + "f": 456, + }, + }, + } + + obj2 := map[string]interface{}{ + "a": "orange", + "z": 987, + "c": map[string]interface{}{ + "d": false, + "e": map[string]interface{}{ + "f": 456, + "g": "kiwi", + }, + }, + } + + base, err := NewNode(obj, model.SourceFile) + require.NoError(t, err) + + overwrite, err := NewNode(obj2, model.SourceEnvVar) + require.NoError(t, err) + + err = base.(*innerNode).Merge(overwrite) + require.NoError(t, err) + + expected := &innerNode{ + remapCase: map[string]string{"a": "a", "b": "b", "z": "z", "c": "c"}, + val: map[string]Node{ + "a": &leafNodeImpl{val: "orange", source: model.SourceEnvVar}, + "b": &leafNodeImpl{val: 123, source: model.SourceFile}, + "z": &leafNodeImpl{val: 987, source: model.SourceEnvVar}, + "c": &innerNode{ + remapCase: map[string]string{"d": "d", "e": "e"}, + val: map[string]Node{ + "d": &leafNodeImpl{val: false, source: model.SourceEnvVar}, + "e": &innerNode{ + remapCase: map[string]string{"f": "f", "g": "g"}, + val: map[string]Node{ + "f": &leafNodeImpl{val: 456, source: model.SourceEnvVar}, + "g": &leafNodeImpl{val: "kiwi", source: model.SourceEnvVar}, + }, + }, + }, + }, + }, + } + assert.Equal(t, expected, base) +} + +func TestMergeErrorLeafToNode(t *testing.T) { + obj := map[string]interface{}{ + "a": "apple", + } + + obj2 := map[string]interface{}{ + "a": map[string]interface{}{}, + } + + base, err := NewNode(obj, model.SourceFile) + require.NoError(t, err) + + overwrite, err := NewNode(obj2, model.SourceEnvVar) + require.NoError(t, err) + + // checking leaf to node + err = base.(*innerNode).Merge(overwrite) + require.Error(t, err) + assert.Equal(t, "tree conflict, can't merge lead and non leaf nodes for 'a'", err.Error()) + + // checking node to leaf + err = overwrite.(*innerNode).Merge(base) + require.Error(t, err) + assert.Equal(t, "tree conflict, can't merge lead and non leaf nodes for 'a'", err.Error()) +} + +func TestMergeErrorLeaf(t *testing.T) { + base, err := newMapNodeImpl(nil, model.SourceDefault) + require.NoError(t, err) + + leaf, err := newLeafNodeImpl(123, model.SourceDefault) + require.NoError(t, err) + + err = base.Merge(leaf) + require.Error(t, err) + assert.Equal(t, "can't merge leaf into a node", err.Error()) +} diff --git a/pkg/config/nodetreemodel/read_config_file.go b/pkg/config/nodetreemodel/read_config_file.go index 3cc574472453c..eb246958b621b 100644 --- a/pkg/config/nodetreemodel/read_config_file.go +++ b/pkg/config/nodetreemodel/read_config_file.go @@ -9,6 +9,7 @@ import ( "io" "os" + "github.com/DataDog/datadog-agent/pkg/config/model" "gopkg.in/yaml.v2" ) @@ -69,7 +70,7 @@ func (c *ntmConfig) readConfigurationContent(content []byte) (Node, error) { if err := yaml.Unmarshal(content, &obj); err != nil { return nil, err } - root, err := NewNode(obj) + root, err := NewNode(obj, model.SourceFile) if err != nil { return nil, err } diff --git a/pkg/config/nodetreemodel/reflection_node.go b/pkg/config/nodetreemodel/reflection_node.go index 41abc6d3c9a3a..5d359f2d1b505 100644 --- a/pkg/config/nodetreemodel/reflection_node.go +++ b/pkg/config/nodetreemodel/reflection_node.go @@ -11,6 +11,8 @@ import ( "strings" "unicode" "unicode/utf8" + + "github.com/DataDog/datadog-agent/pkg/config/model" ) var ( @@ -34,13 +36,13 @@ func asReflectionNode(v interface{}) (Node, error) { } else if rv.Kind() == reflect.Slice { elems := make([]interface{}, 0, rv.Len()) for i := 0; i < rv.Len(); i++ { - node, err := NewNode(rv.Index(i).Interface()) + node, err := NewNode(rv.Index(i).Interface(), model.SourceDefault) if err != nil { return nil, err } elems = append(elems, node) } - return newArrayNodeImpl(elems) + return newArrayNodeImpl(elems, model.SourceDefault) } else if rv.Kind() == reflect.Map { res := make(map[string]interface{}, rv.Len()) mapkeys := rv.MapKeys() @@ -53,7 +55,7 @@ func asReflectionNode(v interface{}) (Node, error) { } res[kstr] = rv.MapIndex(mk).Interface() } - return newMapNodeImpl(res) + return newMapNodeImpl(res, model.SourceDefault) } return nil, errUnknownConversion } @@ -73,7 +75,7 @@ func (n *structNodeImpl) GetChild(key string) (Node, error) { if inner.Kind() == reflect.Interface { inner = inner.Elem() } - return NewNode(inner.Interface()) + return NewNode(inner.Interface(), model.SourceDefault) } // ChildrenKeys returns the list of keys of the children of the given node, if it is a map diff --git a/pkg/config/nodetreemodel/reflection_node_test.go b/pkg/config/nodetreemodel/reflection_node_test.go index 5a6b69d286fca..6fa8746684d70 100644 --- a/pkg/config/nodetreemodel/reflection_node_test.go +++ b/pkg/config/nodetreemodel/reflection_node_test.go @@ -8,6 +8,7 @@ package nodetreemodel import ( "testing" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/stretchr/testify/assert" ) @@ -20,7 +21,7 @@ func TestNewReflectionNode(t *testing.T) { n, err := NewNode(Object{ Name: "test", Num: 7, - }) + }, model.SourceDefault) assert.NoError(t, err) keys, err := n.ChildrenKeys() diff --git a/pkg/config/setup/config.go b/pkg/config/setup/config.go index c805ca0e2a58a..78f93cd122c6a 100644 --- a/pkg/config/setup/config.go +++ b/pkg/config/setup/config.go @@ -537,6 +537,7 @@ func InitConfig(config pkgconfigmodel.Setup) { // - nodes config.BindEnvAndSetDefault("cluster_agent.kube_metadata_collection.resources", []string{}) config.BindEnvAndSetDefault("cluster_agent.kube_metadata_collection.resource_annotations_exclude", []string{}) + config.BindEnvAndSetDefault("cluster_agent.cluster_tagger.grpc_max_message_size", 4<<20) // 4 MB // Metadata endpoints diff --git a/pkg/config/setup/system_probe.go b/pkg/config/setup/system_probe.go index 66762a7d98fae..d8ef99639d574 100644 --- a/pkg/config/setup/system_probe.go +++ b/pkg/config/setup/system_probe.go @@ -31,7 +31,7 @@ const ( pngNS = "ping" tracerouteNS = "traceroute" discoveryNS = "discovery" - gpuMonitoringNS = "gpu_monitoring" + gpuNS = "gpu_monitoring" defaultConnsMessageBatchSize = 600 // defaultServiceMonitoringJavaAgentArgs is default arguments that are passing to the injected java USM agent @@ -256,7 +256,6 @@ func InitSystemProbeConfig(cfg pkgconfigmodel.Config) { cfg.BindEnvAndSetDefault(join(smjtNS, "allow_regex"), "") cfg.BindEnvAndSetDefault(join(smjtNS, "block_regex"), "") cfg.BindEnvAndSetDefault(join(smjtNS, "dir"), defaultSystemProbeJavaDir) - cfg.BindEnvAndSetDefault(join(smNS, "enable_http_stats_by_status_code"), true) cfg.BindEnvAndSetDefault(join(netNS, "enable_gateway_lookup"), true, "DD_SYSTEM_PROBE_NETWORK_ENABLE_GATEWAY_LOOKUP") // Default value (100000) is set in `adjustUSM`, to avoid having "deprecation warning", due to the default value. @@ -408,7 +407,10 @@ func InitSystemProbeConfig(cfg pkgconfigmodel.Config) { cfg.BindEnv("fleet_policies_dir") // GPU monitoring - cfg.BindEnvAndSetDefault(join(gpuMonitoringNS, "enabled"), false) + cfg.BindEnvAndSetDefault(join(gpuNS, "enabled"), false) + cfg.BindEnv(join(gpuNS, "nvml_lib_path")) + cfg.BindEnvAndSetDefault(join(gpuNS, "process_scan_interval_seconds"), 5) + cfg.BindEnvAndSetDefault(join(gpuNS, "initial_process_sync"), true) initCWSSystemProbeConfig(cfg) } diff --git a/pkg/config/structure/unmarshal.go b/pkg/config/structure/unmarshal.go index e59e59f8850c7..2f5596ecddea5 100644 --- a/pkg/config/structure/unmarshal.go +++ b/pkg/config/structure/unmarshal.go @@ -51,7 +51,7 @@ func UnmarshalKey(cfg model.Reader, key string, target interface{}, opts ...Unma if rawval == nil { return nil } - source, err := nodetreemodel.NewNode(rawval) + source, err := nodetreemodel.NewNode(rawval, cfg.GetSource(key)) if err != nil { return err } diff --git a/pkg/config/structure/unmarshal_test.go b/pkg/config/structure/unmarshal_test.go index 61f7811ba7057..35b1da29c3532 100644 --- a/pkg/config/structure/unmarshal_test.go +++ b/pkg/config/structure/unmarshal_test.go @@ -12,6 +12,7 @@ import ( "testing" "github.com/DataDog/datadog-agent/pkg/config/mock" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/config/nodetreemodel" "github.com/stretchr/testify/assert" ) @@ -1260,7 +1261,7 @@ service: func TestMapGetChildNotFound(t *testing.T) { m := map[string]interface{}{"a": "apple", "b": "banana"} - n, err := nodetreemodel.NewNode(m) + n, err := nodetreemodel.NewNode(m, model.SourceDefault) assert.NoError(t, err) val, err := n.GetChild("a") diff --git a/pkg/dynamicinstrumentation/ditypes/config.go b/pkg/dynamicinstrumentation/ditypes/config.go index 6a7c7e08f7e83..ba9667e069552 100644 --- a/pkg/dynamicinstrumentation/ditypes/config.go +++ b/pkg/dynamicinstrumentation/ditypes/config.go @@ -285,7 +285,7 @@ type Probe struct { // GetBPFFuncName cleans the function name to be allowed by the bpf compiler func (p *Probe) GetBPFFuncName() string { // can't have '.', '-' or '/' in bpf program name - replacer := strings.NewReplacer(".", "_", "/", "_", "-", "_", "[", "_", "]", "_") + replacer := strings.NewReplacer(".", "_", "/", "_", "-", "_", "[", "_", "]", "_", "*", "ptr_", "(", "", ")", "") return replacer.Replace(p.FuncName) } diff --git a/pkg/dynamicinstrumentation/testutil/fixtures.go b/pkg/dynamicinstrumentation/testutil/fixtures.go index 575f379343518..f5bfff1222f4c 100644 --- a/pkg/dynamicinstrumentation/testutil/fixtures.go +++ b/pkg/dynamicinstrumentation/testutil/fixtures.go @@ -167,6 +167,21 @@ var structCaptures = fixtures{ "b": capturedValue("string", "bb"), "c": capturedValue("string", "ccc"), }}}, + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.receiver.test_method_receiver": { + "r": { + Type: "struct", Fields: fieldMap{ + "u": capturedValue("uint", "1"), + }}, + "a": capturedValue("int", "2"), + }, + // TODO: re-enable when fixing pointer method receivers + // "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.(*receiver).test_pointer_method_receiver": { + // "r": { + // Type: "struct", Fields: fieldMap{ + // "u": capturedValue("uint", "3"), + // }}, + // "a": capturedValue("int", "4"), + // }, // "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.test_lots_of_fields": {"l": {Type: "struct", Fields: fieldMap{ // "a": capturedValue("uint8", "1"), // "b": capturedValue("uint8", "2"), diff --git a/pkg/dynamicinstrumentation/testutil/sample/other.go b/pkg/dynamicinstrumentation/testutil/sample/other.go index edb9a4f893123..9535163c9dae2 100644 --- a/pkg/dynamicinstrumentation/testutil/sample/other.go +++ b/pkg/dynamicinstrumentation/testutil/sample/other.go @@ -13,6 +13,10 @@ import ( type triggerVerifierErrorForTesting byte +//nolint:all +//go:noinline +func test_channel(c chan bool) {} + //nolint:all //go:noinline func test_trigger_verifier_error(t triggerVerifierErrorForTesting) {} @@ -33,5 +37,8 @@ func Return_goroutine_id() uint64 { //nolint:all //go:noinline func ExecuteOther() { + x := make(chan bool) + test_channel(x) + test_trigger_verifier_error(1) } diff --git a/pkg/dynamicinstrumentation/testutil/sample/structs.go b/pkg/dynamicinstrumentation/testutil/sample/structs.go index 2e1d6d38aa836..ff57ffba8d1c3 100644 --- a/pkg/dynamicinstrumentation/testutil/sample/structs.go +++ b/pkg/dynamicinstrumentation/testutil/sample/structs.go @@ -5,6 +5,18 @@ package sample +type receiver struct { + u uint +} + +//nolint:all +//go:noinline +func (r *receiver) test_pointer_method_receiver(a int) {} + +//nolint:all +//go:noinline +func (r receiver) test_method_receiver(a int) {} + //nolint:all //go:noinline func test_struct_with_array(a structWithAnArray) {} @@ -114,6 +126,13 @@ func ExecuteStructFuncs() { fields := lotsOfFields{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26} test_lots_of_fields(fields) + + rcvr := receiver{1} + rcvr.test_method_receiver(2) + + ptrRcvr := &receiver{3} + ptrRcvr.test_pointer_method_receiver(4) + } type emptyStruct struct{} diff --git a/pkg/ebpf/config.go b/pkg/ebpf/config.go index 3bcea9ba21de2..1f27d5ff7525a 100644 --- a/pkg/ebpf/config.go +++ b/pkg/ebpf/config.go @@ -6,8 +6,6 @@ package ebpf import ( - "strings" - sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kernel" @@ -81,38 +79,34 @@ type Config struct { BypassEnabled bool } -func key(pieces ...string) string { - return strings.Join(pieces, ".") -} - // NewConfig creates a config with ebpf-related settings func NewConfig() *Config { cfg := pkgconfigsetup.SystemProbe() sysconfig.Adjust(cfg) c := &Config{ - BPFDebug: cfg.GetBool(key(spNS, "bpf_debug")), - BPFDir: cfg.GetString(key(spNS, "bpf_dir")), - ExcludedBPFLinuxVersions: cfg.GetStringSlice(key(spNS, "excluded_linux_versions")), - EnableTracepoints: cfg.GetBool(key(spNS, "enable_tracepoints")), + BPFDebug: cfg.GetBool(sysconfig.FullKeyPath(spNS, "bpf_debug")), + BPFDir: cfg.GetString(sysconfig.FullKeyPath(spNS, "bpf_dir")), + ExcludedBPFLinuxVersions: cfg.GetStringSlice(sysconfig.FullKeyPath(spNS, "excluded_linux_versions")), + EnableTracepoints: cfg.GetBool(sysconfig.FullKeyPath(spNS, "enable_tracepoints")), ProcRoot: kernel.ProcFSRoot(), - InternalTelemetryEnabled: cfg.GetBool(key(spNS, "telemetry_enabled")), - - EnableCORE: cfg.GetBool(key(spNS, "enable_co_re")), - BTFPath: cfg.GetString(key(spNS, "btf_path")), - - EnableRuntimeCompiler: cfg.GetBool(key(spNS, "enable_runtime_compiler")), - RuntimeCompilerOutputDir: cfg.GetString(key(spNS, "runtime_compiler_output_dir")), - EnableKernelHeaderDownload: cfg.GetBool(key(spNS, "enable_kernel_header_download")), - KernelHeadersDirs: cfg.GetStringSlice(key(spNS, "kernel_header_dirs")), - KernelHeadersDownloadDir: cfg.GetString(key(spNS, "kernel_header_download_dir")), - AptConfigDir: cfg.GetString(key(spNS, "apt_config_dir")), - YumReposDir: cfg.GetString(key(spNS, "yum_repos_dir")), - ZypperReposDir: cfg.GetString(key(spNS, "zypper_repos_dir")), - AllowPrecompiledFallback: cfg.GetBool(key(spNS, "allow_precompiled_fallback")), - AllowRuntimeCompiledFallback: cfg.GetBool(key(spNS, "allow_runtime_compiled_fallback")), - - AttachKprobesWithKprobeEventsABI: cfg.GetBool(key(spNS, "attach_kprobes_with_kprobe_events_abi")), + InternalTelemetryEnabled: cfg.GetBool(sysconfig.FullKeyPath(spNS, "telemetry_enabled")), + + EnableCORE: cfg.GetBool(sysconfig.FullKeyPath(spNS, "enable_co_re")), + BTFPath: cfg.GetString(sysconfig.FullKeyPath(spNS, "btf_path")), + + EnableRuntimeCompiler: cfg.GetBool(sysconfig.FullKeyPath(spNS, "enable_runtime_compiler")), + RuntimeCompilerOutputDir: cfg.GetString(sysconfig.FullKeyPath(spNS, "runtime_compiler_output_dir")), + EnableKernelHeaderDownload: cfg.GetBool(sysconfig.FullKeyPath(spNS, "enable_kernel_header_download")), + KernelHeadersDirs: cfg.GetStringSlice(sysconfig.FullKeyPath(spNS, "kernel_header_dirs")), + KernelHeadersDownloadDir: cfg.GetString(sysconfig.FullKeyPath(spNS, "kernel_header_download_dir")), + AptConfigDir: cfg.GetString(sysconfig.FullKeyPath(spNS, "apt_config_dir")), + YumReposDir: cfg.GetString(sysconfig.FullKeyPath(spNS, "yum_repos_dir")), + ZypperReposDir: cfg.GetString(sysconfig.FullKeyPath(spNS, "zypper_repos_dir")), + AllowPrecompiledFallback: cfg.GetBool(sysconfig.FullKeyPath(spNS, "allow_precompiled_fallback")), + AllowRuntimeCompiledFallback: cfg.GetBool(sysconfig.FullKeyPath(spNS, "allow_runtime_compiled_fallback")), + + AttachKprobesWithKprobeEventsABI: cfg.GetBool(sysconfig.FullKeyPath(spNS, "attach_kprobes_with_kprobe_events_abi")), } return c diff --git a/pkg/ebpf/helper_call_patcher.go b/pkg/ebpf/helper_call_patcher.go index 1408c0fb887ac..0a623276a75ff 100644 --- a/pkg/ebpf/helper_call_patcher.go +++ b/pkg/ebpf/helper_call_patcher.go @@ -14,16 +14,12 @@ import ( "github.com/cilium/ebpf/asm" ) -// noopIns is used in place of the eBPF helpers we wish to remove from +// replaceIns is used in place of the eBPF helpers we wish to remove from // the bytecode. // -// note we're using here the same noop instruction used internally by the -// verifier: -// https://elixir.bootlin.com/linux/v6.7/source/kernel/bpf/verifier.c#L18582 -var noopIns = asm.Instruction{ - OpCode: asm.Ja.Op(asm.ImmSource), - Constant: 0, -} +// Helper calls clobber r1-r5 and the return value is expected in r0. +// We are replacing with `r0 = 0` so code which checks the return value works as expected. +var replaceIns = asm.Mov.Imm(asm.R0, 0) // NewHelperCallRemover provides a `Modifier` that patches eBPF bytecode // such that calls to the functions given by `helpers` are replaced by @@ -70,7 +66,7 @@ func (h *helperCallRemover) BeforeInit(m *manager.Manager, _ *manager.Options) e for _, fn := range h.helpers { if ins.Constant == int64(fn) { - *ins = noopIns.WithMetadata(ins.Metadata) + *ins = replaceIns.WithMetadata(ins.Metadata) break } } diff --git a/pkg/eventmonitor/config/config.go b/pkg/eventmonitor/config/config.go index 785b1ffd968ef..04248f6460e77 100644 --- a/pkg/eventmonitor/config/config.go +++ b/pkg/eventmonitor/config/config.go @@ -7,8 +7,7 @@ package config import ( - "strings" - + sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -34,21 +33,17 @@ type Config struct { func NewConfig() *Config { return &Config{ // event server - SocketPath: pkgconfigsetup.SystemProbe().GetString(join(evNS, "socket")), - EventServerBurst: pkgconfigsetup.SystemProbe().GetInt(join(evNS, "event_server.burst")), + SocketPath: pkgconfigsetup.SystemProbe().GetString(sysconfig.FullKeyPath(evNS, "socket")), + EventServerBurst: pkgconfigsetup.SystemProbe().GetInt(sysconfig.FullKeyPath(evNS, "event_server.burst")), // consumers ProcessConsumerEnabled: getBool("process.enabled"), } } -func join(pieces ...string) string { - return strings.Join(pieces, ".") -} - func getAllKeys(key string) (string, string) { - deprecatedKey := strings.Join([]string{rsNS, key}, ".") - newKey := strings.Join([]string{evNS, key}, ".") + deprecatedKey := sysconfig.FullKeyPath(rsNS, key) + newKey := sysconfig.FullKeyPath(evNS, key) return deprecatedKey, newKey } diff --git a/pkg/eventmonitor/eventmonitor.go b/pkg/eventmonitor/eventmonitor.go index a0b4a84e2cc9f..ce93880272057 100644 --- a/pkg/eventmonitor/eventmonitor.go +++ b/pkg/eventmonitor/eventmonitor.go @@ -11,7 +11,6 @@ package eventmonitor import ( "context" "fmt" - "net" "slices" "sync" "time" @@ -55,7 +54,6 @@ type EventMonitor struct { cancelFnc context.CancelFunc sendStatsChan chan chan bool eventConsumers []EventConsumerInterface - netListener net.Listener wg sync.WaitGroup } @@ -108,8 +106,6 @@ func (m *EventMonitor) Start() error { return fmt.Errorf("unable to register event monitoring module: %w", err) } - m.netListener = ln - m.wg.Add(1) go func() { defer m.wg.Done() @@ -169,17 +165,17 @@ func (m *EventMonitor) Close() { m.GRPCServer.Stop() } - if m.netListener != nil { - m.netListener.Close() + if err := m.cleanup(); err != nil { + seclog.Errorf("failed to cleanup event monitor: %v", err) } - m.cleanup() - m.cancelFnc() m.wg.Wait() // all the go routines should be stopped now we can safely call close the probe and remove the eBPF programs - m.Probe.Close() + if err := m.Probe.Close(); err != nil { + seclog.Errorf("failed to close event monitor probe: %v", err) + } } // SendStats send stats diff --git a/pkg/eventmonitor/eventmonitor_linux.go b/pkg/eventmonitor/eventmonitor_linux.go index cae6dff3153df..5ef2576dce7db 100644 --- a/pkg/eventmonitor/eventmonitor_linux.go +++ b/pkg/eventmonitor/eventmonitor_linux.go @@ -26,10 +26,15 @@ func (m *EventMonitor) getListener() (net.Listener, error) { func (m *EventMonitor) init() error { // force socket cleanup of previous socket not cleanup - os.Remove(m.Config.SocketPath) + if err := os.Remove(m.Config.SocketPath); err != nil && !os.IsNotExist(err) { + return err + } return nil } -func (m *EventMonitor) cleanup() { - os.Remove(m.Config.SocketPath) +func (m *EventMonitor) cleanup() error { + if err := os.Remove(m.Config.SocketPath); err != nil && !os.IsNotExist(err) { + return err + } + return nil } diff --git a/pkg/eventmonitor/eventmonitor_windows.go b/pkg/eventmonitor/eventmonitor_windows.go index e6263e7b9a7f1..84d88bf4e4f56 100644 --- a/pkg/eventmonitor/eventmonitor_windows.go +++ b/pkg/eventmonitor/eventmonitor_windows.go @@ -19,4 +19,6 @@ func (m *EventMonitor) init() error { return nil } -func (m *EventMonitor) cleanup() {} +func (m *EventMonitor) cleanup() error { + return nil +} diff --git a/pkg/fleet/installer/installer.go b/pkg/fleet/installer/installer.go index 9d39d9719e4b6..88a08f12d13cd 100644 --- a/pkg/fleet/installer/installer.go +++ b/pkg/fleet/installer/installer.go @@ -186,6 +186,10 @@ func (i *installerImpl) Install(ctx context.Context, url string, args []string) return fmt.Errorf("could not create temporary directory: %w", err) } defer os.RemoveAll(tmpDir) + err = i.db.DeletePackage(pkg.Name) + if err != nil { + return fmt.Errorf("could not remove package installation in db: %w", err) + } configDir := filepath.Join(i.userConfigsDir, pkg.Name) err = pkg.ExtractLayers(oci.DatadogPackageLayerMediaType, tmpDir) if err != nil { diff --git a/pkg/fleet/installer/installer_test.go b/pkg/fleet/installer/installer_test.go index 069c2e04cf9e8..8a87c4b51e3ee 100644 --- a/pkg/fleet/installer/installer_test.go +++ b/pkg/fleet/installer/installer_test.go @@ -9,8 +9,10 @@ import ( "context" "io/fs" "os" + "path" "path/filepath" "testing" + "time" "github.com/stretchr/testify/assert" @@ -122,3 +124,83 @@ func TestUninstallExperiment(t *testing.T) { // we do not rollback configuration examples to their previous versions currently fixtures.AssertEqualFS(t, s.ConfigFS(fixtures.FixtureSimpleV2), installer.ConfigFS(fixtures.FixtureSimpleV2)) } + +func TestInstallSkippedWhenAlreadyInstalled(t *testing.T) { + s := fixtures.NewServer(t) + installer := newTestPackageManager(t, s, t.TempDir(), t.TempDir()) + defer installer.db.Close() + + err := installer.Install(testCtx, s.PackageURL(fixtures.FixtureSimpleV1), nil) + assert.NoError(t, err) + r := installer.packages.Get(fixtures.FixtureSimpleV1.Package) + lastModTime, err := latestModTimeFS(r.StableFS(), ".") + assert.NoError(t, err) + + err = installer.Install(testCtx, s.PackageURL(fixtures.FixtureSimpleV1), nil) + assert.NoError(t, err) + r = installer.packages.Get(fixtures.FixtureSimpleV1.Package) + newLastModTime, err := latestModTimeFS(r.StableFS(), ".") + assert.NoError(t, err) + assert.Equal(t, lastModTime, newLastModTime) +} + +func TestReinstallAfterDBClean(t *testing.T) { + s := fixtures.NewServer(t) + installer := newTestPackageManager(t, s, t.TempDir(), t.TempDir()) + defer installer.db.Close() + + err := installer.Install(testCtx, s.PackageURL(fixtures.FixtureSimpleV1), nil) + assert.NoError(t, err) + r := installer.packages.Get(fixtures.FixtureSimpleV1.Package) + lastModTime, err := latestModTimeFS(r.StableFS(), ".") + assert.NoError(t, err) + + installer.db.DeletePackage(fixtures.FixtureSimpleV1.Package) + + err = installer.Install(testCtx, s.PackageURL(fixtures.FixtureSimpleV1), nil) + assert.NoError(t, err) + r = installer.packages.Get(fixtures.FixtureSimpleV1.Package) + newLastModTime, err := latestModTimeFS(r.StableFS(), ".") + assert.NoError(t, err) + assert.NotEqual(t, lastModTime, newLastModTime) +} + +func latestModTimeFS(fsys fs.FS, dirPath string) (time.Time, error) { + var latestTime time.Time + + // Read the directory entries + entries, err := fs.ReadDir(fsys, dirPath) + if err != nil { + return latestTime, err + } + + for _, entry := range entries { + // Get full path of the entry + entryPath := path.Join(dirPath, entry.Name()) + + // Get file info to access modification time + info, err := fs.Stat(fsys, entryPath) + if err != nil { + return latestTime, err + } + + // Update the latest modification time + if info.ModTime().After(latestTime) { + latestTime = info.ModTime() + } + + // If the entry is a directory, recurse into it + if entry.IsDir() { + subLatestTime, err := latestModTimeFS(fsys, entryPath) // Recurse into subdirectory + if err != nil { + return latestTime, err + } + // Compare times + if subLatestTime.After(latestTime) { + latestTime = subLatestTime + } + } + } + + return latestTime, nil +} diff --git a/pkg/fleet/internal/cdn/config_datadog_agent.go b/pkg/fleet/internal/cdn/config_datadog_agent.go index 473e3bc67e843..3699ab216791e 100644 --- a/pkg/fleet/internal/cdn/config_datadog_agent.go +++ b/pkg/fleet/internal/cdn/config_datadog_agent.go @@ -151,25 +151,37 @@ func (a *agentConfig) Write(dir string) error { if a.datadog != nil { err = os.WriteFile(filepath.Join(dir, configDatadogYAML), []byte(a.datadog), 0640) if err != nil { - return fmt.Errorf("could not write datadog.yaml: %w", err) + return fmt.Errorf("could not write %s: %w", configDatadogYAML, err) } if runtime.GOOS != "windows" { err = os.Chown(filepath.Join(dir, configDatadogYAML), ddAgentUID, ddAgentGID) if err != nil { - return fmt.Errorf("could not chown datadog.yaml: %w", err) + return fmt.Errorf("could not chown %s: %w", configDatadogYAML, err) } } } if a.securityAgent != nil { - err = os.WriteFile(filepath.Join(dir, configSecurityAgentYAML), []byte(a.securityAgent), 0600) + err = os.WriteFile(filepath.Join(dir, configSecurityAgentYAML), []byte(a.securityAgent), 0440) if err != nil { - return fmt.Errorf("could not write datadog.yaml: %w", err) + return fmt.Errorf("could not write %s: %w", configSecurityAgentYAML, err) + } + if runtime.GOOS != "windows" { + err = os.Chown(filepath.Join(dir, configSecurityAgentYAML), 0, ddAgentGID) // root:dd-agent + if err != nil { + return fmt.Errorf("could not chown %s: %w", configSecurityAgentYAML, err) + } } } if a.systemProbe != nil { - err = os.WriteFile(filepath.Join(dir, configSystemProbeYAML), []byte(a.systemProbe), 0600) + err = os.WriteFile(filepath.Join(dir, configSystemProbeYAML), []byte(a.systemProbe), 0440) if err != nil { - return fmt.Errorf("could not write datadog.yaml: %w", err) + return fmt.Errorf("could not write %s: %w", configSecurityAgentYAML, err) + } + if runtime.GOOS != "windows" { + err = os.Chown(filepath.Join(dir, configSystemProbeYAML), 0, ddAgentGID) // root:dd-agent + if err != nil { + return fmt.Errorf("could not chown %s: %w", configSecurityAgentYAML, err) + } } } return nil diff --git a/pkg/gpu/config.go b/pkg/gpu/config.go index 995fbde58cbd9..d8beee7591dff 100644 --- a/pkg/gpu/config.go +++ b/pkg/gpu/config.go @@ -7,26 +7,34 @@ package gpu import ( + sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "time" "github.com/DataDog/datadog-agent/pkg/ebpf" ) -// GPUConfigNS is the namespace for the GPU monitoring probe. -const GPUConfigNS = "gpu_monitoring" +// GPUNS is the namespace for the GPU monitoring probe. +const GPUNS = "gpu_monitoring" // Config holds the configuration for the GPU monitoring probe. type Config struct { - *ebpf.Config + ebpf.Config + // ScanTerminatedProcessesInterval is the interval at which the probe scans for terminated processes. ScanTerminatedProcessesInterval time.Duration - InitialProcessSync bool + // InitialProcessSync indicates whether the probe should sync the process list on startup. + InitialProcessSync bool + // NVMLLibraryPath is the path of the native libnvidia-ml.so library + NVMLLibraryPath string } // NewConfig generates a new configuration for the GPU monitoring probe. func NewConfig() *Config { + spCfg := pkgconfigsetup.SystemProbe() return &Config{ - Config: ebpf.NewConfig(), - ScanTerminatedProcessesInterval: 5 * time.Second, - InitialProcessSync: true, + Config: *ebpf.NewConfig(), + ScanTerminatedProcessesInterval: time.Duration(spCfg.GetInt(sysconfig.FullKeyPath(GPUNS, "process_scan_interval_seconds"))) * time.Second, + InitialProcessSync: spCfg.GetBool(sysconfig.FullKeyPath(GPUNS, "initial_process_sync")), + NVMLLibraryPath: spCfg.GetString(sysconfig.FullKeyPath(GPUNS, "nvml_lib_path")), } } diff --git a/pkg/gpu/context.go b/pkg/gpu/context.go new file mode 100644 index 0000000000000..ebd66116b48c2 --- /dev/null +++ b/pkg/gpu/context.go @@ -0,0 +1,59 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package gpu + +import ( + "fmt" + + "github.com/NVIDIA/go-nvml/pkg/nvml" + + sectime "github.com/DataDog/datadog-agent/pkg/security/resolvers/time" +) + +// systemContext holds certain attributes about the system that are used by the GPU probe. +type systemContext struct { + // maxGpuThreadsPerDevice maps each device index to the maximum number of threads it can run in parallel + maxGpuThreadsPerDevice map[int]int + + // timeResolver allows to resolve kernel-time timestamps + timeResolver *sectime.Resolver + + // nvmlLib is the NVML library used to query GPU devices + nvmlLib nvml.Interface +} + +func getSystemContext(nvmlLib nvml.Interface) (*systemContext, error) { + ctx := &systemContext{ + maxGpuThreadsPerDevice: make(map[int]int), + nvmlLib: nvmlLib, + } + + if err := ctx.queryDevices(); err != nil { + return nil, fmt.Errorf("error querying devices: %w", err) + } + + return ctx, nil +} + +func (ctx *systemContext) queryDevices() error { + devices, err := getGPUDevices(ctx.nvmlLib) + if err != nil { + return fmt.Errorf("error getting GPU devices: %w", err) + } + + for i, device := range devices { + maxThreads, err := getMaxThreadsForDevice(device) + if err != nil { + return fmt.Errorf("error getting max threads for device %s: %w", device, err) + } + + ctx.maxGpuThreadsPerDevice[i] = maxThreads + } + + return nil +} diff --git a/pkg/gpu/nvml.go b/pkg/gpu/nvml.go new file mode 100644 index 0000000000000..0343701df4167 --- /dev/null +++ b/pkg/gpu/nvml.go @@ -0,0 +1,59 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux_bpf + +// Package gpu defines the agent corecheck for +// the GPU integration +package gpu + +import ( + "errors" + "fmt" + + "github.com/NVIDIA/go-nvml/pkg/nvml" +) + +func wrapNvmlError(ret nvml.Return) error { + if ret == nvml.SUCCESS { + return nil + } + + return errors.New(nvml.ErrorString(ret)) +} + +func getGPUDevices(lib nvml.Interface) ([]nvml.Device, error) { + count, ret := lib.DeviceGetCount() + if err := wrapNvmlError(ret); err != nil { + return nil, fmt.Errorf("cannot get number of GPU devices: %w", err) + } + + var devices []nvml.Device + + for i := 0; i < count; i++ { + device, ret := lib.DeviceGetHandleByIndex(i) + if err := wrapNvmlError(ret); err != nil { + return nil, fmt.Errorf("cannot get handle for GPU device %d: %w", i, err) + } + + devices = append(devices, device) + } + + return devices, nil +} + +// GetMaxThreads returns the maximum number of threads that can be run on the +// GPU. Each GPU core runs a thread, so this is the number of cores. Do not +// confuse the number of cores with the number of streaming multiprocessors +// (SM): the number of cores is equal to the number of SMs multiplied by the +// number of cores per SM. +func getMaxThreadsForDevice(device nvml.Device) (int, error) { + cores, ret := device.GetNumGpuCores() + if err := wrapNvmlError(ret); err != nil { + return 0, fmt.Errorf("cannot get number of GPU cores: %w", err) + } + + return int(cores), nil +} diff --git a/pkg/gpu/probe.go b/pkg/gpu/probe.go index 1f0a287287fdd..905120afc82a0 100644 --- a/pkg/gpu/probe.go +++ b/pkg/gpu/probe.go @@ -13,6 +13,7 @@ import ( "regexp" manager "github.com/DataDog/ebpf-manager" + "github.com/NVIDIA/go-nvml/pkg/nvml" "github.com/cilium/ebpf" "github.com/cilium/ebpf/rlimit" @@ -36,16 +37,27 @@ const ( const consumerChannelSize = 4096 +// ProbeDependencies holds the dependencies for the probe +type ProbeDependencies struct { + // Telemetry is the telemetry component + Telemetry telemetry.Component + + // NvmlLib is the NVML library interface + NvmlLib nvml.Interface +} + // Probe represents the GPU monitoring probe type Probe struct { mgr *ddebpf.Manager cfg *Config consumer *cudaEventConsumer attacher *uprobes.UprobeAttacher + deps ProbeDependencies + sysCtx *systemContext } // NewProbe starts the GPU monitoring probe -func NewProbe(cfg *Config, telemetryComponent telemetry.Component) (*Probe, error) { +func NewProbe(cfg *Config, deps ProbeDependencies) (*Probe, error) { log.Debugf("starting GPU monitoring probe...") kv, err := kernel.HostVersion() if err != nil { @@ -62,7 +74,7 @@ func NewProbe(cfg *Config, telemetryComponent telemetry.Component) (*Probe, erro } err = ddebpf.LoadCOREAsset(filename, func(buf bytecode.AssetReader, opts manager.Options) error { var err error - probe, err = startGPUProbe(buf, opts, telemetryComponent, cfg) + probe, err = startGPUProbe(buf, opts, deps, cfg) if err != nil { return fmt.Errorf("cannot start GPU monitoring probe: %s", err) } @@ -76,7 +88,7 @@ func NewProbe(cfg *Config, telemetryComponent telemetry.Component) (*Probe, erro return probe, nil } -func startGPUProbe(buf bytecode.AssetReader, opts manager.Options, _ telemetry.Component, cfg *Config) (*Probe, error) { +func startGPUProbe(buf bytecode.AssetReader, opts manager.Options, deps ProbeDependencies, cfg *Config) (*Probe, error) { mgr := ddebpf.NewManagerWithDefault(&manager.Manager{ Maps: []*manager.Map{ {Name: cudaAllocCacheMap}, @@ -121,7 +133,7 @@ func startGPUProbe(buf bytecode.AssetReader, opts manager.Options, _ telemetry.C }, }, }, - EbpfConfig: cfg.Config, + EbpfConfig: &cfg.Config, PerformInitialScan: cfg.InitialProcessSync, } @@ -138,6 +150,12 @@ func startGPUProbe(buf bytecode.AssetReader, opts manager.Options, _ telemetry.C mgr: mgr, cfg: cfg, attacher: attacher, + deps: deps, + } + + p.sysCtx, err = getSystemContext(deps.NvmlLib) + if err != nil { + return nil, fmt.Errorf("error getting system context: %w", err) } p.startEventConsumer() diff --git a/pkg/gpu/probe_stub.go b/pkg/gpu/probe_stub.go index 534786a31aba6..f4cbbae2cf57a 100644 --- a/pkg/gpu/probe_stub.go +++ b/pkg/gpu/probe_stub.go @@ -3,21 +3,29 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2024-present Datadog, Inc. -//go:build !linux_bpf +//go:build !linux_bpf && linux package gpu import ( + "github.com/NVIDIA/go-nvml/pkg/nvml" + "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/gpu/model" "github.com/DataDog/datadog-agent/pkg/ebpf" ) +// ProbeDependencies holds the dependencies for the probe +type ProbeDependencies struct { + Telemetry telemetry.Component + NvmlLib nvml.Interface +} + // Probe is not implemented on non-linux systems type Probe struct{} // NewProbe is not implemented on non-linux systems -func NewProbe(_ *Config, _ telemetry.Component) (*Probe, error) { +func NewProbe(_ *Config, _ ProbeDependencies) (*Probe, error) { return nil, ebpf.ErrNotImplemented } diff --git a/pkg/gpu/probe_test.go b/pkg/gpu/probe_test.go index 9b0e560fd36c6..c86a1d0ac11e8 100644 --- a/pkg/gpu/probe_test.go +++ b/pkg/gpu/probe_test.go @@ -26,7 +26,8 @@ func TestProbeCanLoad(t *testing.T) { t.Skipf("minimum kernel version %s not met, read %s", minimumKernelVersion, kver) } - probe, err := NewProbe(NewConfig(), nil) + nvmlMock := testutil.GetBasicNvmlMock() + probe, err := NewProbe(NewConfig(), ProbeDependencies{NvmlLib: nvmlMock}) require.NoError(t, err) require.NotNil(t, probe) t.Cleanup(probe.Close) @@ -46,11 +47,15 @@ func TestProbeCanReceiveEvents(t *testing.T) { procMon := monitor.GetProcessMonitor() require.NotNil(t, procMon) require.NoError(t, procMon.Initialize(false)) + t.Cleanup(procMon.Stop) cfg := NewConfig() cfg.InitialProcessSync = false cfg.BPFDebug = true - probe, err := NewProbe(cfg, nil) + + nvmlMock := testutil.GetBasicNvmlMock() + + probe, err := NewProbe(cfg, ProbeDependencies{NvmlLib: nvmlMock}) require.NoError(t, err) require.NotNil(t, probe) t.Cleanup(probe.Close) diff --git a/pkg/gpu/testutil/mocks.go b/pkg/gpu/testutil/mocks.go new file mode 100644 index 0000000000000..e414973d509c0 --- /dev/null +++ b/pkg/gpu/testutil/mocks.go @@ -0,0 +1,33 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux + +package testutil + +import ( + "github.com/NVIDIA/go-nvml/pkg/nvml" + nvmlmock "github.com/NVIDIA/go-nvml/pkg/nvml/mock" +) + +// DefaultGpuCores is the default number of cores for a GPU device in the mock. +const DefaultGpuCores = 10 + +// GetBasicNvmlMock returns a mock of the nvml.Interface with a single device with 10 cores, +// useful for basic tests that need only the basic interaction with NVML to be working. +func GetBasicNvmlMock() *nvmlmock.Interface { + return &nvmlmock.Interface{ + DeviceGetCountFunc: func() (int, nvml.Return) { + return 1, nvml.SUCCESS + }, + DeviceGetHandleByIndexFunc: func(int) (nvml.Device, nvml.Return) { + return &nvmlmock.Device{ + GetNumGpuCoresFunc: func() (int, nvml.Return) { + return DefaultGpuCores, nvml.SUCCESS + }, + }, nvml.SUCCESS + }, + } +} diff --git a/pkg/logs/launchers/integration/launcher.go b/pkg/logs/launchers/integration/launcher.go index b822e90eabad7..de88863114936 100644 --- a/pkg/logs/launchers/integration/launcher.go +++ b/pkg/logs/launchers/integration/launcher.go @@ -50,7 +50,7 @@ type Launcher struct { // fileInfo stores information about each file that is needed in order to keep // track of the combined and overall disk usage by the logs files type fileInfo struct { - filename string + fileWithPath string lastModified time.Time size int64 } @@ -149,7 +149,7 @@ func (s *Launcher) run() { s.integrationToFile[cfg.IntegrationID] = logFile } - filetypeSource := s.makeFileSource(source, logFile.filename) + filetypeSource := s.makeFileSource(source, logFile.fileWithPath) s.sources.AddSource(filetypeSource) } } @@ -179,8 +179,17 @@ func (s *Launcher) receiveLogs(log integrations.IntegrationLog) { // Ensure the individual file doesn't exceed integrations_logs_files_max_size // Add 1 because we write the \n at the end as well logSize := int64(len(log.Log)) + 1 + + if logSize > s.fileSizeMax { + ddLog.Warnf("Individual log size (%d bytes) is larger than maximum allowable file size (%d bytes), skipping writing to log file: %s", logSize, s.fileSizeMax, log.Log) + return + } else if logSize > s.combinedUsageMax { + ddLog.Warnf("Individual log size (%d bytes) is larger than maximum allowable file size (%d bytes), skipping writing to log file: %s", logSize, s.combinedUsageMax, log.Log) + return + } + if fileToUpdate.size+logSize > s.fileSizeMax { - file, err := os.Create(fileToUpdate.filename) + file, err := os.Create(fileToUpdate.fileWithPath) if err != nil { ddLog.Error("Failed to delete and remake oversize file:", err) return @@ -211,7 +220,7 @@ func (s *Launcher) receiveLogs(log integrations.IntegrationLog) { return } - file, err := os.Create(leastRecentlyModifiedFile.filename) + file, err := os.Create(leastRecentlyModifiedFile.fileWithPath) if err != nil { ddLog.Error("Error creating log file:", err) continue @@ -223,7 +232,7 @@ func (s *Launcher) receiveLogs(log integrations.IntegrationLog) { } } - err := s.writeLogToFileFunction(filepath.Join(s.runPath, fileToUpdate.filename), log.Log) + err := s.writeLogToFileFunction(fileToUpdate.fileWithPath, log.Log) if err != nil { ddLog.Warn("Error writing log to file:", err) return @@ -236,12 +245,11 @@ func (s *Launcher) receiveLogs(log integrations.IntegrationLog) { } func (s *Launcher) deleteFile(file *fileInfo) error { - filename := filepath.Join(s.runPath, file.filename) - err := os.Remove(filename) + err := os.Remove(file.fileWithPath) if err != nil { return err } - ddLog.Info("Successfully deleted log file:", filename) + ddLog.Info("Successfully deleted log file:", file.fileWithPath) s.combinedUsageSize -= file.size @@ -321,7 +329,7 @@ func (s *Launcher) createFile(source string) (*fileInfo, error) { } fileInfo := &fileInfo{ - filename: filepath, + fileWithPath: filepath, lastModified: time.Now(), size: 0, } @@ -349,8 +357,8 @@ func computeMaxDiskUsage(runPath string, logsTotalUsageSetting int64, usageRatio diskReserved := float64(usage.Total) * (1 - usageRatio) diskAvailable := int64(usage.Available) - int64(math.Ceil(diskReserved)) - if diskAvailable < 0 { - ddLog.Warn("Available disk calculated as less than 0: ", diskAvailable, ". Disk reserved:", diskReserved) + if diskAvailable <= 0 { + ddLog.Warnf("Available disk calculated as %d bytes, disk reserved is %f bytes. Check %s and make sure there is enough free space on disk", diskAvailable, diskReserved, "integrations_logs_disk_ratio") diskAvailable = 0 } @@ -370,12 +378,12 @@ func (s *Launcher) scanInitialFiles(dir string) error { } fileInfo := &fileInfo{ - filename: info.Name(), + fileWithPath: filepath.Join(dir, info.Name()), size: info.Size(), lastModified: info.ModTime(), } - integrationID := fileNameToID(fileInfo.filename) + integrationID := fileNameToID(fileInfo.fileWithPath) s.integrationToFile[integrationID] = fileInfo s.combinedUsageSize += info.Size() diff --git a/pkg/logs/launchers/integration/launcher_test.go b/pkg/logs/launchers/integration/launcher_test.go index 21d5c293ff860..fd0c543d3b21a 100644 --- a/pkg/logs/launchers/integration/launcher_test.go +++ b/pkg/logs/launchers/integration/launcher_test.go @@ -6,6 +6,7 @@ package integration import ( + "github.com/DataDog/datadog-agent/pkg/util/testutil/flake" "os" "path/filepath" "testing" @@ -93,7 +94,7 @@ func (suite *LauncherTestSuite) TestSendLog() { assert.Equal(suite.T(), foundSource.Config.Type, config.FileType) assert.Equal(suite.T(), foundSource.Config.Source, "foo") assert.Equal(suite.T(), foundSource.Config.Service, "bar") - expectedPath := filepath.Join(suite.s.runPath, suite.s.integrationToFile[id].filename) + expectedPath := suite.s.integrationToFile[id].fileWithPath assert.Equal(suite.T(), logSample, <-fileLogChan) assert.Equal(suite.T(), expectedPath, <-filepathChan) @@ -104,7 +105,7 @@ func (suite *LauncherTestSuite) TestSendLog() { func (suite *LauncherTestSuite) TestNegativeCombinedUsageMax() { suite.s.combinedUsageMax = -1 err := suite.s.scanInitialFiles(suite.s.runPath) - assert.NotNil(suite.T(), err) + assert.Error(suite.T(), err) } // TestZeroCombinedUsageMax ensures the launcher won't panic when @@ -113,9 +114,9 @@ func (suite *LauncherTestSuite) TestZeroCombinedUsageMaxFileCreated() { suite.s.combinedUsageMax = 0 filename := "sample_integration_123.log" - filepath := filepath.Join(suite.s.runPath, filename) - file, err := os.Create(filepath) - assert.Nil(suite.T(), err) + fileWithPath := filepath.Join(suite.s.runPath, filename) + file, err := os.Create(fileWithPath) + assert.NoError(suite.T(), err) file.Close() @@ -143,44 +144,53 @@ func (suite *LauncherTestSuite) TestZeroCombinedUsageMaxFileNotCreated() { } func (suite *LauncherTestSuite) TestSmallCombinedUsageMax() { - suite.s.combinedUsageMax = 10 + suite.s.combinedUsageMax = 15 filename := "sample_integration_123.log" - filepath := filepath.Join(suite.s.runPath, filename) - file, err := os.Create(filepath) - assert.Nil(suite.T(), err) + fileWithPath := filepath.Join(suite.s.runPath, filename) + file, err := os.Create(fileWithPath) + assert.NoError(suite.T(), err) file.Close() suite.s.Start(nil, nil, nil, nil) // Launcher should write this log - writtenLog := "sample" + shortLog := "sample" integrationLog := integrations.IntegrationLog{ - Log: writtenLog, + Log: shortLog, IntegrationID: "sample_integration:123", } suite.s.receiveLogs(integrationLog) - fileStat, err := os.Stat(filepath) - assert.Nil(suite.T(), err) - assert.Equal(suite.T(), fileStat.Size(), int64(len(writtenLog)+1)) + fileStat, err := os.Stat(fileWithPath) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), fileStat.Size(), int64(len(shortLog)+1)) - // Launcher should delete file for this log - unwrittenLog := "sample log two" + // Launcher should delete and remake the log file for this log since it would break combinedUsageMax threshold + longLog := "sample log two" integrationLogTwo := integrations.IntegrationLog{ - Log: unwrittenLog, + Log: longLog, IntegrationID: "sample_integration:123", } suite.s.receiveLogs(integrationLogTwo) + _, err = os.Stat(fileWithPath) + assert.NoError(suite.T(), err) - _, err = os.Stat(filepath) - assert.True(suite.T(), os.IsNotExist(err)) + // Launcher should skip writing this log since it's larger than combinedUsageMax + unwrittenLog := "this log is too long" + unwrittenIntegrationLog := integrations.IntegrationLog{ + Log: unwrittenLog, + IntegrationID: "sample_integration:123", + } + suite.s.receiveLogs(unwrittenIntegrationLog) + _, err = os.Stat(fileWithPath) + assert.NoError(suite.T(), err) // Remake the file suite.s.receiveLogs(integrationLog) - fileStat, err = os.Stat(filepath) - assert.Nil(suite.T(), err) - assert.Equal(suite.T(), fileStat.Size(), int64(len(writtenLog)+1)) + fileStat, err = os.Stat(fileWithPath) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), fileStat.Size(), int64(len(shortLog)+1)) } func (suite *LauncherTestSuite) TestWriteLogToFile() { @@ -215,13 +225,13 @@ func (suite *LauncherTestSuite) TestWriteMultipleLogsToFile() { // TestDeleteFile tests that deleteFile properly deletes the correct file func (suite *LauncherTestSuite) TestDeleteFile() { filename := "testfile.log" - filepath := filepath.Join(suite.s.runPath, filename) - file, err := os.Create(filepath) - fileinfo := &fileInfo{filename: filename, size: int64(0)} - assert.Nil(suite.T(), err) + fileWithPath := filepath.Join(suite.s.runPath, filename) + file, err := os.Create(fileWithPath) + fileinfo := &fileInfo{fileWithPath: fileWithPath, size: int64(0)} + assert.NoError(suite.T(), err) - info, err := os.Stat(filepath) - assert.Nil(suite.T(), err) + info, err := os.Stat(fileWithPath) + assert.NoError(suite.T(), err) assert.Equal(suite.T(), int64(0), info.Size(), "Newly created file size not zero") // Write data the file and make sure ensureFileSize deletes the file for being too large @@ -229,14 +239,14 @@ func (suite *LauncherTestSuite) TestDeleteFile() { file.Write(data) file.Close() - info, err = os.Stat(filepath) - assert.Nil(suite.T(), err) + info, err = os.Stat(fileWithPath) + assert.NoError(suite.T(), err) assert.Equal(suite.T(), int64(2*1024*1024), info.Size()) err = suite.s.deleteFile(fileinfo) - assert.Nil(suite.T(), err) + assert.NoError(suite.T(), err) - _, err = os.Stat(filepath) + _, err = os.Stat(fileWithPath) assert.True(suite.T(), os.IsNotExist(err)) } @@ -281,9 +291,9 @@ func (suite *LauncherTestSuite) TestFileExceedsSingleFileLimit() { suite.s.fileSizeMax = oneMB filename := "sample_integration_123.log" - filepath := filepath.Join(suite.s.runPath, filename) - file, err := os.Create(filepath) - assert.Nil(suite.T(), err) + fileWithPath := filepath.Join(suite.s.runPath, filename) + file, err := os.Create(fileWithPath) + assert.NoError(suite.T(), err) file.Write(make([]byte, oneMB)) file.Close() @@ -308,8 +318,9 @@ func (suite *LauncherTestSuite) TestScanInitialFiles() { filename := "sample_integration_123.log" fileSize := int64(1 * 1024 * 1024) - file, err := os.Create(filepath.Join(suite.s.runPath, filename)) - assert.Nil(suite.T(), err) + fileWithPath := filepath.Join(suite.s.runPath, filename) + file, err := os.Create(fileWithPath) + assert.NoError(suite.T(), err) data := make([]byte, fileSize) file.Write(data) @@ -320,7 +331,7 @@ func (suite *LauncherTestSuite) TestScanInitialFiles() { actualFileInfo := suite.s.integrationToFile[fileID] assert.NotEmpty(suite.T(), suite.s.integrationToFile) - assert.Equal(suite.T(), actualFileInfo.filename, filename) + assert.Equal(suite.T(), actualFileInfo.fileWithPath, fileWithPath) assert.Equal(suite.T(), fileSize, actualFileInfo.size) assert.Equal(suite.T(), fileSize, suite.s.combinedUsageSize) } @@ -331,8 +342,9 @@ func (suite *LauncherTestSuite) TestCreateFileAfterScanInitialFile() { filename := "sample_integration_123.log" fileSize := int64(1 * 1024 * 1024) - file, err := os.Create(filepath.Join(suite.s.runPath, filename)) - assert.Nil(suite.T(), err) + fileWithPath := filepath.Join(suite.s.runPath, filename) + file, err := os.Create(fileWithPath) + assert.NoError(suite.T(), err) data := make([]byte, fileSize) file.Write(data) @@ -343,7 +355,7 @@ func (suite *LauncherTestSuite) TestCreateFileAfterScanInitialFile() { scannedFile := suite.s.integrationToFile[fileID] assert.NotEmpty(suite.T(), suite.s.integrationToFile) - assert.Equal(suite.T(), filename, scannedFile.filename) + assert.Equal(suite.T(), fileWithPath, scannedFile.fileWithPath) assert.Equal(suite.T(), fileSize, scannedFile.size) assert.Equal(suite.T(), fileSize, suite.s.combinedUsageSize) @@ -440,9 +452,9 @@ func (suite *LauncherTestSuite) TestInitialLogsExceedTotalUsageMultipleFiles() { dataOneMB := make([]byte, oneMB) file1, err := os.Create(filepath.Join(suite.s.runPath, filename1)) - assert.Nil(suite.T(), err) + assert.NoError(suite.T(), err) file2, err := os.Create(filepath.Join(suite.s.runPath, filename2)) - assert.Nil(suite.T(), err) + assert.NoError(suite.T(), err) file1.Write(dataOneMB) file2.Write(dataOneMB) @@ -465,7 +477,7 @@ func (suite *LauncherTestSuite) TestInitialLogExceedsTotalUsageSingleFile() { dataTwoMB := make([]byte, 2*oneMB) file, err := os.Create(filepath.Join(suite.s.runPath, filename)) - assert.Nil(suite.T(), err) + assert.NoError(suite.T(), err) file.Write(dataTwoMB) file.Close() @@ -481,9 +493,9 @@ func (suite *LauncherTestSuite) TestInitialLogExceedsTotalUsageSingleFile() { // large func (suite *LauncherTestSuite) TestScanInitialFilesDeletesProperly() { err := os.RemoveAll(suite.s.runPath) - assert.Nil(suite.T(), err) + assert.NoError(suite.T(), err) os.MkdirAll(suite.s.runPath, 0755) - assert.Nil(suite.T(), err) + assert.NoError(suite.T(), err) oneMB := int64(1 * 1024 * 1024) suite.s.combinedUsageMax = oneMB @@ -493,9 +505,9 @@ func (suite *LauncherTestSuite) TestScanInitialFilesDeletesProperly() { name := filepath.Join(suite.s.runPath, filename1) file1, err := os.Create(name) - assert.Nil(suite.T(), err) + assert.NoError(suite.T(), err) file2, err := os.Create(filepath.Join(suite.s.runPath, filename2)) - assert.Nil(suite.T(), err) + assert.NoError(suite.T(), err) dataOneMB := make([]byte, oneMB) file1.Write(dataOneMB) @@ -507,7 +519,7 @@ func (suite *LauncherTestSuite) TestScanInitialFilesDeletesProperly() { // make sure there is only one file in the directory files, err := os.ReadDir(suite.s.runPath) - assert.Nil(suite.T(), err) + assert.NoError(suite.T(), err) fileCount := 0 for _, file := range files { @@ -526,9 +538,18 @@ func TestLauncherTestSuite(t *testing.T) { // TestReadOnlyFileSystem ensures the launcher doesn't panic in a read-only // file system. There will be errors but it should handle them gracefully. func TestReadOnlyFileSystem(t *testing.T) { + /* + Currently this test fails randomly with: + 1729313171735561634 [Info] Successfully created integrations log file: /tmp/TestReadOnlyFileSystem4096893197/001/readonly/integrations/123456789.log + testing.go:1231: TempDir RemoveAll cleanup: unlinkat /tmp/TestReadOnlyFileSystem4096893197/001/readonly/integrations: directory not empty + This looks like an issue with the Launcher still writing data to the "read-only" directory (which isn't read only + as we run these tests in a container with root). + */ + flake.Mark(t) + readOnlyDir := filepath.Join(t.TempDir(), "readonly") err := os.Mkdir(readOnlyDir, 0444) - assert.Nil(t, err, "Unable to make tempdir readonly") + assert.NoError(t, err, "Unable to make tempdir readonly") pkgconfigsetup.Datadog().SetWithoutSource("logs_config.run_path", readOnlyDir) diff --git a/pkg/network/config/config.go b/pkg/network/config/config.go index 36c8557010e15..c6d57230a4003 100644 --- a/pkg/network/config/config.go +++ b/pkg/network/config/config.go @@ -7,7 +7,6 @@ package config import ( - "strings" "time" cebpf "github.com/cilium/ebpf" @@ -263,10 +262,6 @@ type Config struct { // TCPFailedConnectionsEnabled specifies whether the tracer will track & report TCP error codes TCPFailedConnectionsEnabled bool - // EnableHTTPStatsByStatusCode specifies if the HTTP stats should be aggregated by the actual status code - // instead of the status code family. - EnableHTTPStatsByStatusCode bool - // EnableNPMConnectionRollup enables aggregating connections by rolling up ephemeral ports EnableNPMConnectionRollup bool @@ -292,10 +287,6 @@ type Config struct { EnableUSMEventStream bool } -func join(pieces ...string) string { - return strings.Join(pieces, ".") -} - // New creates a config for the network tracer func New() *Config { cfg := pkgconfigsetup.SystemProbe() @@ -304,103 +295,102 @@ func New() *Config { c := &Config{ Config: *ebpf.NewConfig(), - NPMEnabled: cfg.GetBool(join(netNS, "enabled")), - ServiceMonitoringEnabled: cfg.GetBool(join(smNS, "enabled")), + NPMEnabled: cfg.GetBool(sysconfig.FullKeyPath(netNS, "enabled")), + ServiceMonitoringEnabled: cfg.GetBool(sysconfig.FullKeyPath(smNS, "enabled")), - CollectTCPv4Conns: cfg.GetBool(join(netNS, "collect_tcp_v4")), - CollectTCPv6Conns: cfg.GetBool(join(netNS, "collect_tcp_v6")), + CollectTCPv4Conns: cfg.GetBool(sysconfig.FullKeyPath(netNS, "collect_tcp_v4")), + CollectTCPv6Conns: cfg.GetBool(sysconfig.FullKeyPath(netNS, "collect_tcp_v6")), TCPConnTimeout: 2 * time.Minute, - CollectUDPv4Conns: cfg.GetBool(join(netNS, "collect_udp_v4")), - CollectUDPv6Conns: cfg.GetBool(join(netNS, "collect_udp_v6")), + CollectUDPv4Conns: cfg.GetBool(sysconfig.FullKeyPath(netNS, "collect_udp_v4")), + CollectUDPv6Conns: cfg.GetBool(sysconfig.FullKeyPath(netNS, "collect_udp_v6")), UDPConnTimeout: defaultUDPTimeoutSeconds * time.Second, UDPStreamTimeout: defaultUDPStreamTimeoutSeconds * time.Second, - OffsetGuessThreshold: uint64(cfg.GetInt64(join(spNS, "offset_guess_threshold"))), - ExcludedSourceConnections: cfg.GetStringMapStringSlice(join(spNS, "source_excludes")), - ExcludedDestinationConnections: cfg.GetStringMapStringSlice(join(spNS, "dest_excludes")), - - TCPFailedConnectionsEnabled: cfg.GetBool(join(netNS, "enable_tcp_failed_connections")), - MaxTrackedConnections: uint32(cfg.GetInt64(join(spNS, "max_tracked_connections"))), - MaxClosedConnectionsBuffered: uint32(cfg.GetInt64(join(spNS, "max_closed_connections_buffered"))), - MaxFailedConnectionsBuffered: uint32(cfg.GetInt64(join(netNS, "max_failed_connections_buffered"))), - ClosedConnectionFlushThreshold: cfg.GetInt(join(spNS, "closed_connection_flush_threshold")), - ClosedChannelSize: cfg.GetInt(join(spNS, "closed_channel_size")), - MaxConnectionsStateBuffered: cfg.GetInt(join(spNS, "max_connection_state_buffered")), + OffsetGuessThreshold: uint64(cfg.GetInt64(sysconfig.FullKeyPath(spNS, "offset_guess_threshold"))), + ExcludedSourceConnections: cfg.GetStringMapStringSlice(sysconfig.FullKeyPath(spNS, "source_excludes")), + ExcludedDestinationConnections: cfg.GetStringMapStringSlice(sysconfig.FullKeyPath(spNS, "dest_excludes")), + + TCPFailedConnectionsEnabled: cfg.GetBool(sysconfig.FullKeyPath(netNS, "enable_tcp_failed_connections")), + MaxTrackedConnections: uint32(cfg.GetInt64(sysconfig.FullKeyPath(spNS, "max_tracked_connections"))), + MaxClosedConnectionsBuffered: uint32(cfg.GetInt64(sysconfig.FullKeyPath(spNS, "max_closed_connections_buffered"))), + MaxFailedConnectionsBuffered: uint32(cfg.GetInt64(sysconfig.FullKeyPath(netNS, "max_failed_connections_buffered"))), + ClosedConnectionFlushThreshold: cfg.GetInt(sysconfig.FullKeyPath(spNS, "closed_connection_flush_threshold")), + ClosedChannelSize: cfg.GetInt(sysconfig.FullKeyPath(spNS, "closed_channel_size")), + MaxConnectionsStateBuffered: cfg.GetInt(sysconfig.FullKeyPath(spNS, "max_connection_state_buffered")), ClientStateExpiry: 2 * time.Minute, - DNSInspection: !cfg.GetBool(join(spNS, "disable_dns_inspection")), - CollectDNSStats: cfg.GetBool(join(spNS, "collect_dns_stats")), - CollectLocalDNS: cfg.GetBool(join(spNS, "collect_local_dns")), - CollectDNSDomains: cfg.GetBool(join(spNS, "collect_dns_domains")), - MaxDNSStats: cfg.GetInt(join(spNS, "max_dns_stats")), + DNSInspection: !cfg.GetBool(sysconfig.FullKeyPath(spNS, "disable_dns_inspection")), + CollectDNSStats: cfg.GetBool(sysconfig.FullKeyPath(spNS, "collect_dns_stats")), + CollectLocalDNS: cfg.GetBool(sysconfig.FullKeyPath(spNS, "collect_local_dns")), + CollectDNSDomains: cfg.GetBool(sysconfig.FullKeyPath(spNS, "collect_dns_domains")), + MaxDNSStats: cfg.GetInt(sysconfig.FullKeyPath(spNS, "max_dns_stats")), MaxDNSStatsBuffered: 75000, - DNSTimeout: time.Duration(cfg.GetInt(join(spNS, "dns_timeout_in_s"))) * time.Second, - - ProtocolClassificationEnabled: cfg.GetBool(join(netNS, "enable_protocol_classification")), - - NPMRingbuffersEnabled: cfg.GetBool(join(netNS, "enable_ringbuffers")), - - EnableHTTPMonitoring: cfg.GetBool(join(smNS, "enable_http_monitoring")), - EnableHTTP2Monitoring: cfg.GetBool(join(smNS, "enable_http2_monitoring")), - EnableKafkaMonitoring: cfg.GetBool(join(smNS, "enable_kafka_monitoring")), - EnablePostgresMonitoring: cfg.GetBool(join(smNS, "enable_postgres_monitoring")), - EnableRedisMonitoring: cfg.GetBool(join(smNS, "enable_redis_monitoring")), - EnableNativeTLSMonitoring: cfg.GetBool(join(smNS, "tls", "native", "enabled")), - EnableIstioMonitoring: cfg.GetBool(join(smNS, "tls", "istio", "enabled")), - EnvoyPath: cfg.GetString(join(smNS, "tls", "istio", "envoy_path")), - EnableNodeJSMonitoring: cfg.GetBool(join(smNS, "tls", "nodejs", "enabled")), - MaxUSMConcurrentRequests: uint32(cfg.GetInt(join(smNS, "max_concurrent_requests"))), - MaxHTTPStatsBuffered: cfg.GetInt(join(smNS, "max_http_stats_buffered")), - MaxKafkaStatsBuffered: cfg.GetInt(join(smNS, "max_kafka_stats_buffered")), - MaxPostgresStatsBuffered: cfg.GetInt(join(smNS, "max_postgres_stats_buffered")), - MaxPostgresTelemetryBuffer: cfg.GetInt(join(smNS, "max_postgres_telemetry_buffer")), - MaxRedisStatsBuffered: cfg.GetInt(join(smNS, "max_redis_stats_buffered")), - - MaxTrackedHTTPConnections: cfg.GetInt64(join(smNS, "max_tracked_http_connections")), - HTTPNotificationThreshold: cfg.GetInt64(join(smNS, "http_notification_threshold")), - HTTPMaxRequestFragment: cfg.GetInt64(join(smNS, "http_max_request_fragment")), - - EnableConntrack: cfg.GetBool(join(spNS, "enable_conntrack")), - ConntrackMaxStateSize: cfg.GetInt(join(spNS, "conntrack_max_state_size")), - ConntrackRateLimit: cfg.GetInt(join(spNS, "conntrack_rate_limit")), + DNSTimeout: time.Duration(cfg.GetInt(sysconfig.FullKeyPath(spNS, "dns_timeout_in_s"))) * time.Second, + + ProtocolClassificationEnabled: cfg.GetBool(sysconfig.FullKeyPath(netNS, "enable_protocol_classification")), + + NPMRingbuffersEnabled: cfg.GetBool(sysconfig.FullKeyPath(netNS, "enable_ringbuffers")), + + EnableHTTPMonitoring: cfg.GetBool(sysconfig.FullKeyPath(smNS, "enable_http_monitoring")), + EnableHTTP2Monitoring: cfg.GetBool(sysconfig.FullKeyPath(smNS, "enable_http2_monitoring")), + EnableKafkaMonitoring: cfg.GetBool(sysconfig.FullKeyPath(smNS, "enable_kafka_monitoring")), + EnablePostgresMonitoring: cfg.GetBool(sysconfig.FullKeyPath(smNS, "enable_postgres_monitoring")), + EnableRedisMonitoring: cfg.GetBool(sysconfig.FullKeyPath(smNS, "enable_redis_monitoring")), + EnableNativeTLSMonitoring: cfg.GetBool(sysconfig.FullKeyPath(smNS, "tls", "native", "enabled")), + EnableIstioMonitoring: cfg.GetBool(sysconfig.FullKeyPath(smNS, "tls", "istio", "enabled")), + EnvoyPath: cfg.GetString(sysconfig.FullKeyPath(smNS, "tls", "istio", "envoy_path")), + EnableNodeJSMonitoring: cfg.GetBool(sysconfig.FullKeyPath(smNS, "tls", "nodejs", "enabled")), + MaxUSMConcurrentRequests: uint32(cfg.GetInt(sysconfig.FullKeyPath(smNS, "max_concurrent_requests"))), + MaxHTTPStatsBuffered: cfg.GetInt(sysconfig.FullKeyPath(smNS, "max_http_stats_buffered")), + MaxKafkaStatsBuffered: cfg.GetInt(sysconfig.FullKeyPath(smNS, "max_kafka_stats_buffered")), + MaxPostgresStatsBuffered: cfg.GetInt(sysconfig.FullKeyPath(smNS, "max_postgres_stats_buffered")), + MaxPostgresTelemetryBuffer: cfg.GetInt(sysconfig.FullKeyPath(smNS, "max_postgres_telemetry_buffer")), + MaxRedisStatsBuffered: cfg.GetInt(sysconfig.FullKeyPath(smNS, "max_redis_stats_buffered")), + + MaxTrackedHTTPConnections: cfg.GetInt64(sysconfig.FullKeyPath(smNS, "max_tracked_http_connections")), + HTTPNotificationThreshold: cfg.GetInt64(sysconfig.FullKeyPath(smNS, "http_notification_threshold")), + HTTPMaxRequestFragment: cfg.GetInt64(sysconfig.FullKeyPath(smNS, "http_max_request_fragment")), + + EnableConntrack: cfg.GetBool(sysconfig.FullKeyPath(spNS, "enable_conntrack")), + ConntrackMaxStateSize: cfg.GetInt(sysconfig.FullKeyPath(spNS, "conntrack_max_state_size")), + ConntrackRateLimit: cfg.GetInt(sysconfig.FullKeyPath(spNS, "conntrack_rate_limit")), ConntrackRateLimitInterval: 3 * time.Second, - EnableConntrackAllNamespaces: cfg.GetBool(join(spNS, "enable_conntrack_all_namespaces")), - IgnoreConntrackInitFailure: cfg.GetBool(join(netNS, "ignore_conntrack_init_failure")), - ConntrackInitTimeout: cfg.GetDuration(join(netNS, "conntrack_init_timeout")), - EnableEbpfConntracker: cfg.GetBool(join(netNS, "enable_ebpf_conntracker")), + EnableConntrackAllNamespaces: cfg.GetBool(sysconfig.FullKeyPath(spNS, "enable_conntrack_all_namespaces")), + IgnoreConntrackInitFailure: cfg.GetBool(sysconfig.FullKeyPath(netNS, "ignore_conntrack_init_failure")), + ConntrackInitTimeout: cfg.GetDuration(sysconfig.FullKeyPath(netNS, "conntrack_init_timeout")), + EnableEbpfConntracker: cfg.GetBool(sysconfig.FullKeyPath(netNS, "enable_ebpf_conntracker")), - EnableGatewayLookup: cfg.GetBool(join(netNS, "enable_gateway_lookup")), + EnableGatewayLookup: cfg.GetBool(sysconfig.FullKeyPath(netNS, "enable_gateway_lookup")), - EnableMonotonicCount: cfg.GetBool(join(spNS, "windows.enable_monotonic_count")), + EnableMonotonicCount: cfg.GetBool(sysconfig.FullKeyPath(spNS, "windows.enable_monotonic_count")), - RecordedQueryTypes: cfg.GetStringSlice(join(netNS, "dns_recorded_query_types")), + RecordedQueryTypes: cfg.GetStringSlice(sysconfig.FullKeyPath(netNS, "dns_recorded_query_types")), - EnableProcessEventMonitoring: cfg.GetBool(join(evNS, "network_process", "enabled")), - MaxProcessesTracked: cfg.GetInt(join(evNS, "network_process", "max_processes_tracked")), + EnableProcessEventMonitoring: cfg.GetBool(sysconfig.FullKeyPath(evNS, "network_process", "enabled")), + MaxProcessesTracked: cfg.GetInt(sysconfig.FullKeyPath(evNS, "network_process", "max_processes_tracked")), - EnableRootNetNs: cfg.GetBool(join(netNS, "enable_root_netns")), + EnableRootNetNs: cfg.GetBool(sysconfig.FullKeyPath(netNS, "enable_root_netns")), - HTTP2DynamicTableMapCleanerInterval: time.Duration(cfg.GetInt(join(smNS, "http2_dynamic_table_map_cleaner_interval_seconds"))) * time.Second, + HTTP2DynamicTableMapCleanerInterval: time.Duration(cfg.GetInt(sysconfig.FullKeyPath(smNS, "http2_dynamic_table_map_cleaner_interval_seconds"))) * time.Second, - HTTPMapCleanerInterval: time.Duration(cfg.GetInt(join(smNS, "http_map_cleaner_interval_in_s"))) * time.Second, - HTTPIdleConnectionTTL: time.Duration(cfg.GetInt(join(smNS, "http_idle_connection_ttl_in_s"))) * time.Second, + HTTPMapCleanerInterval: time.Duration(cfg.GetInt(sysconfig.FullKeyPath(smNS, "http_map_cleaner_interval_in_s"))) * time.Second, + HTTPIdleConnectionTTL: time.Duration(cfg.GetInt(sysconfig.FullKeyPath(smNS, "http_idle_connection_ttl_in_s"))) * time.Second, - EnableNPMConnectionRollup: cfg.GetBool(join(netNS, "enable_connection_rollup")), + EnableNPMConnectionRollup: cfg.GetBool(sysconfig.FullKeyPath(netNS, "enable_connection_rollup")), - EnableEbpfless: cfg.GetBool(join(netNS, "enable_ebpfless")), + EnableEbpfless: cfg.GetBool(sysconfig.FullKeyPath(netNS, "enable_ebpfless")), // Service Monitoring - EnableGoTLSSupport: cfg.GetBool(join(smNS, "tls", "go", "enabled")), - GoTLSExcludeSelf: cfg.GetBool(join(smNS, "tls", "go", "exclude_self")), - EnableHTTPStatsByStatusCode: cfg.GetBool(join(smNS, "enable_http_stats_by_status_code")), - EnableUSMQuantization: cfg.GetBool(join(smNS, "enable_quantization")), - EnableUSMConnectionRollup: cfg.GetBool(join(smNS, "enable_connection_rollup")), - EnableUSMRingBuffers: cfg.GetBool(join(smNS, "enable_ring_buffers")), - EnableUSMEventStream: cfg.GetBool(join(smNS, "enable_event_stream")), + EnableGoTLSSupport: cfg.GetBool(sysconfig.FullKeyPath(smNS, "tls", "go", "enabled")), + GoTLSExcludeSelf: cfg.GetBool(sysconfig.FullKeyPath(smNS, "tls", "go", "exclude_self")), + EnableUSMQuantization: cfg.GetBool(sysconfig.FullKeyPath(smNS, "enable_quantization")), + EnableUSMConnectionRollup: cfg.GetBool(sysconfig.FullKeyPath(smNS, "enable_connection_rollup")), + EnableUSMRingBuffers: cfg.GetBool(sysconfig.FullKeyPath(smNS, "enable_ring_buffers")), + EnableUSMEventStream: cfg.GetBool(sysconfig.FullKeyPath(smNS, "enable_event_stream")), } - httpRRKey := join(smNS, "http_replace_rules") + httpRRKey := sysconfig.FullKeyPath(smNS, "http_replace_rules") rr, err := parseReplaceRules(cfg, httpRRKey) if err != nil { log.Errorf("error parsing %q: %v", httpRRKey, err) diff --git a/pkg/network/config/config_test.go b/pkg/network/config/config_test.go index 50c90c3947b1d..4873d9b1baeb8 100644 --- a/pkg/network/config/config_test.go +++ b/pkg/network/config/config_test.go @@ -74,26 +74,6 @@ func TestDisablingProtocolClassification(t *testing.T) { }) } -func TestEnableHTTPStatsByStatusCode(t *testing.T) { - t.Run("via YAML", func(t *testing.T) { - mockSystemProbe := mock.NewSystemProbe(t) - mockSystemProbe.SetWithoutSource("service_monitoring_config.enable_http_stats_by_status_code", true) - cfg := New() - - assert.True(t, cfg.EnableHTTPStatsByStatusCode) - }) - - t.Run("via ENV variable", func(t *testing.T) { - mock.NewSystemProbe(t) - t.Setenv("DD_SERVICE_MONITORING_CONFIG_ENABLE_HTTP_STATS_BY_STATUS_CODE", "true") - cfg := New() - _, err := sysconfig.New("", "") - require.NoError(t, err) - - assert.True(t, cfg.EnableHTTPStatsByStatusCode) - }) -} - func TestEnableHTTPMonitoring(t *testing.T) { t.Run("via deprecated YAML", func(t *testing.T) { mockSystemProbe := mock.NewSystemProbe(t) diff --git a/pkg/network/encoding/encoding_test.go b/pkg/network/encoding/encoding_test.go index ed50111ee7c66..88343a419cb30 100644 --- a/pkg/network/encoding/encoding_test.go +++ b/pkg/network/encoding/encoding_test.go @@ -187,16 +187,7 @@ func getExpectedConnections(encodedWithQueryType bool, httpOutBlob []byte) *mode } func TestSerialization(t *testing.T) { - t.Run("status code", func(t *testing.T) { - testSerialization(t, true) - }) - t.Run("status class", func(t *testing.T) { - testSerialization(t, false) - }) -} - -func testSerialization(t *testing.T, aggregateByStatusCode bool) { - httpReqStats := http.NewRequestStats(aggregateByStatusCode) + httpReqStats := http.NewRequestStats() in := &network.Connections{ BufferedData: network.BufferedData{ Conns: []network.ConnectionStats{ @@ -490,22 +481,13 @@ func testSerialization(t *testing.T, aggregateByStatusCode bool) { } func TestHTTPSerializationWithLocalhostTraffic(t *testing.T) { - t.Run("status code", func(t *testing.T) { - testHTTPSerializationWithLocalhostTraffic(t, true) - }) - t.Run("status class", func(t *testing.T) { - testHTTPSerializationWithLocalhostTraffic(t, false) - }) -} - -func testHTTPSerializationWithLocalhostTraffic(t *testing.T, aggregateByStatusCode bool) { var ( clientPort = uint16(52800) serverPort = uint16(8080) localhost = util.AddressFromString("127.0.0.1") ) - httpReqStats := http.NewRequestStats(aggregateByStatusCode) + httpReqStats := http.NewRequestStats() in := &network.Connections{ BufferedData: network.BufferedData{ Conns: []network.ConnectionStats{ @@ -659,23 +641,13 @@ func assertConnsEqualHTTP2(t *testing.T, expected, actual *model.Connections) { } func TestHTTP2SerializationWithLocalhostTraffic(t *testing.T) { - t.Run("status code", func(t *testing.T) { - testHTTP2SerializationWithLocalhostTraffic(t, true) - }) - t.Run("status class", func(t *testing.T) { - testHTTP2SerializationWithLocalhostTraffic(t, false) - }) - -} - -func testHTTP2SerializationWithLocalhostTraffic(t *testing.T, aggregateByStatusCode bool) { var ( clientPort = uint16(52800) serverPort = uint16(8080) localhost = util.AddressFromString("127.0.0.1") ) - http2ReqStats := http.NewRequestStats(aggregateByStatusCode) + http2ReqStats := http.NewRequestStats() in := &network.Connections{ BufferedData: network.BufferedData{ Conns: []network.ConnectionStats{ diff --git a/pkg/network/encoding/marshal/usm_http2_test.go b/pkg/network/encoding/marshal/usm_http2_test.go index 852e2ba81181d..0a658e0066b3c 100644 --- a/pkg/network/encoding/marshal/usm_http2_test.go +++ b/pkg/network/encoding/marshal/usm_http2_test.go @@ -43,15 +43,7 @@ func TestHTTP2Stats(t *testing.T) { func (s *HTTP2Suite) TestFormatHTTP2Stats() { t := s.T() - t.Run("status code", func(t *testing.T) { - testFormatHTTP2Stats(t, true) - }) - t.Run("status class", func(t *testing.T) { - testFormatHTTP2Stats(t, false) - }) -} -func testFormatHTTP2Stats(t *testing.T, aggregateByStatusCode bool) { var ( clientPort = uint16(52800) serverPort = uint16(8080) @@ -68,7 +60,7 @@ func testFormatHTTP2Stats(t *testing.T, aggregateByStatusCode bool) { true, http.MethodGet, ) - http2Stats1 := http.NewRequestStats(aggregateByStatusCode) + http2Stats1 := http.NewRequestStats() for _, i := range statusCodes { http2Stats1.AddRequest(i, 10, 1<<(i/100-1), nil) } @@ -78,7 +70,7 @@ func testFormatHTTP2Stats(t *testing.T, aggregateByStatusCode bool) { Content: http.Interner.GetString("/testpath-2"), FullPath: true, } - http2Stats2 := http.NewRequestStats(aggregateByStatusCode) + http2Stats2 := http.NewRequestStats() for _, i := range statusCodes { http2Stats2.AddRequest(i, 20, 1<<(i/100-1), nil) } @@ -117,9 +109,8 @@ func testFormatHTTP2Stats(t *testing.T, aggregateByStatusCode bool) { } for _, statusCode := range statusCodes { - code := int32(http2Stats1.NormalizeStatusCode(statusCode)) - out.EndpointAggregations[0].StatsByStatusCode[code] = &model.HTTPStats_Data{Count: 1, FirstLatencySample: 10, Latencies: nil} - out.EndpointAggregations[1].StatsByStatusCode[code] = &model.HTTPStats_Data{Count: 1, FirstLatencySample: 20, Latencies: nil} + out.EndpointAggregations[0].StatsByStatusCode[int32(statusCode)] = &model.HTTPStats_Data{Count: 1, FirstLatencySample: 10, Latencies: nil} + out.EndpointAggregations[1].StatsByStatusCode[int32(statusCode)] = &model.HTTPStats_Data{Count: 1, FirstLatencySample: 20, Latencies: nil} } http2Encoder := newHTTP2Encoder(in.HTTP2) @@ -133,16 +124,8 @@ func testFormatHTTP2Stats(t *testing.T, aggregateByStatusCode bool) { func (s *HTTP2Suite) TestFormatHTTP2StatsByPath() { t := s.T() - t.Run("status code", func(t *testing.T) { - testFormatHTTP2StatsByPath(t, true) - }) - t.Run("status class", func(t *testing.T) { - testFormatHTTP2StatsByPath(t, false) - }) -} -func testFormatHTTP2StatsByPath(t *testing.T, aggregateByStatusCode bool) { - http2ReqStats := http.NewRequestStats(aggregateByStatusCode) + http2ReqStats := http.NewRequestStats() http2ReqStats.AddRequest(100, 12.5, 0, nil) http2ReqStats.AddRequest(100, 12.5, tagGnuTLS, nil) @@ -151,11 +134,11 @@ func testFormatHTTP2StatsByPath(t *testing.T, aggregateByStatusCode bool) { // Verify the latency data is correct prior to serialization - latencies := http2ReqStats.Data[http2ReqStats.NormalizeStatusCode(100)].Latencies + latencies := http2ReqStats.Data[100].Latencies assert.Equal(t, 2.0, latencies.GetCount()) verifyQuantile(t, latencies, 0.5, 12.5) - latencies = http2ReqStats.Data[http2ReqStats.NormalizeStatusCode(405)].Latencies + latencies = http2ReqStats.Data[405].Latencies assert.Equal(t, 2.0, latencies.GetCount()) verifyQuantile(t, latencies, 0.5, 3.5) @@ -199,12 +182,12 @@ func testFormatHTTP2StatsByPath(t *testing.T, aggregateByStatusCode bool) { statsByResponseStatus := endpointAggregations[0].StatsByStatusCode assert.Len(t, statsByResponseStatus, 2) - serializedLatencies := statsByResponseStatus[int32(http2ReqStats.NormalizeStatusCode(100))].Latencies + serializedLatencies := statsByResponseStatus[int32(100)].Latencies sketch := unmarshalSketch(t, serializedLatencies) assert.Equal(t, 2.0, sketch.GetCount()) verifyQuantile(t, sketch, 0.5, 12.5) - serializedLatencies = statsByResponseStatus[int32(http2ReqStats.NormalizeStatusCode(405))].Latencies + serializedLatencies = statsByResponseStatus[int32(405)].Latencies sketch = unmarshalSketch(t, serializedLatencies) assert.Equal(t, 2.0, sketch.GetCount()) verifyQuantile(t, sketch, 0.5, 3.5) @@ -215,16 +198,8 @@ func testFormatHTTP2StatsByPath(t *testing.T, aggregateByStatusCode bool) { func (s *HTTP2Suite) TestHTTP2IDCollisionRegression() { t := s.T() - t.Run("status code", func(t *testing.T) { - testHTTP2IDCollisionRegression(t, true) - }) - t.Run("status class", func(t *testing.T) { - testHTTP2IDCollisionRegression(t, false) - }) -} -func testHTTP2IDCollisionRegression(t *testing.T, aggregateByStatusCode bool) { - http2Stats := http.NewRequestStats(aggregateByStatusCode) + http2Stats := http.NewRequestStats() assert := assert.New(t) connections := []network.ConnectionStats{ { @@ -269,7 +244,7 @@ func testHTTP2IDCollisionRegression(t *testing.T, aggregateByStatusCode bool) { // back a non-nil result aggregations, _, _ := getHTTP2Aggregations(t, http2Encoder, connections[0]) assert.Equal("/", aggregations.EndpointAggregations[0].Path) - assert.Equal(uint32(1), aggregations.EndpointAggregations[0].StatsByStatusCode[int32(http2Stats.NormalizeStatusCode(104))].Count) + assert.Equal(uint32(1), aggregations.EndpointAggregations[0].StatsByStatusCode[int32(104)].Count) // assert that the other connections sharing the same (source,destination) // addresses but different PIDs *won't* be associated with the HTTP2 stats @@ -284,15 +259,7 @@ func testHTTP2IDCollisionRegression(t *testing.T, aggregateByStatusCode bool) { func (s *HTTP2Suite) TestHTTP2LocalhostScenario() { t := s.T() - t.Run("status code", func(t *testing.T) { - testHTTP2LocalhostScenario(t, true) - }) - t.Run("status class", func(t *testing.T) { - testHTTP2LocalhostScenario(t, false) - }) -} -func testHTTP2LocalhostScenario(t *testing.T, aggregateByStatusCode bool) { assert := assert.New(t) cliport := uint16(6000) serverport := uint16(80) @@ -313,7 +280,7 @@ func testHTTP2LocalhostScenario(t *testing.T, aggregateByStatusCode bool) { }, } - http2Stats := http.NewRequestStats(aggregateByStatusCode) + http2Stats := http.NewRequestStats() httpKey := http.NewKey( util.AddressFromString("127.0.0.1"), util.AddressFromString("127.0.0.1"), @@ -358,11 +325,11 @@ func testHTTP2LocalhostScenario(t *testing.T, aggregateByStatusCode bool) { // will have HTTP2 stats aggregations, _, _ := getHTTP2Aggregations(t, http2Encoder, in.Conns[0]) assert.Equal("/", aggregations.EndpointAggregations[0].Path) - assert.Equal(uint32(1), aggregations.EndpointAggregations[0].StatsByStatusCode[int32(http2Stats.NormalizeStatusCode(103))].Count) + assert.Equal(uint32(1), aggregations.EndpointAggregations[0].StatsByStatusCode[int32(103)].Count) aggregations, _, _ = getHTTP2Aggregations(t, http2Encoder, in.Conns[1]) assert.Equal("/", aggregations.EndpointAggregations[0].Path) - assert.Equal(uint32(1), aggregations.EndpointAggregations[0].StatsByStatusCode[int32(http2Stats.NormalizeStatusCode(103))].Count) + assert.Equal(uint32(1), aggregations.EndpointAggregations[0].StatsByStatusCode[int32(103)].Count) } func getHTTP2Aggregations(t *testing.T, encoder *http2Encoder, c network.ConnectionStats) (*model.HTTP2Aggregations, uint64, map[string]struct{}) { diff --git a/pkg/network/encoding/marshal/usm_http_test.go b/pkg/network/encoding/marshal/usm_http_test.go index c03737f80091a..b96a22c7fbf32 100644 --- a/pkg/network/encoding/marshal/usm_http_test.go +++ b/pkg/network/encoding/marshal/usm_http_test.go @@ -23,15 +23,6 @@ import ( ) func TestFormatHTTPStats(t *testing.T) { - t.Run("status code", func(t *testing.T) { - testFormatHTTPStats(t, true) - }) - t.Run("status class", func(t *testing.T) { - testFormatHTTPStats(t, false) - }) -} - -func testFormatHTTPStats(t *testing.T, aggregateByStatusCode bool) { var ( clientPort = uint16(52800) serverPort = uint16(8080) @@ -48,7 +39,7 @@ func testFormatHTTPStats(t *testing.T, aggregateByStatusCode bool) { true, http.MethodGet, ) - httpStats1 := http.NewRequestStats(aggregateByStatusCode) + httpStats1 := http.NewRequestStats() for _, i := range statusCodes { httpStats1.AddRequest(i, 10, 1<<(i/100-1), nil) } @@ -58,7 +49,7 @@ func testFormatHTTPStats(t *testing.T, aggregateByStatusCode bool) { Content: http.Interner.GetString("/testpath-2"), FullPath: true, } - httpStats2 := http.NewRequestStats(aggregateByStatusCode) + httpStats2 := http.NewRequestStats() for _, i := range statusCodes { httpStats2.AddRequest(i, 20, 1<<(i/100-1), nil) } @@ -97,9 +88,8 @@ func testFormatHTTPStats(t *testing.T, aggregateByStatusCode bool) { } for _, statusCode := range statusCodes { - code := int32(httpStats1.NormalizeStatusCode(statusCode)) - out.EndpointAggregations[0].StatsByStatusCode[code] = &model.HTTPStats_Data{Count: 1, FirstLatencySample: 10, Latencies: nil} - out.EndpointAggregations[1].StatsByStatusCode[code] = &model.HTTPStats_Data{Count: 1, FirstLatencySample: 20, Latencies: nil} + out.EndpointAggregations[0].StatsByStatusCode[int32(statusCode)] = &model.HTTPStats_Data{Count: 1, FirstLatencySample: 10, Latencies: nil} + out.EndpointAggregations[1].StatsByStatusCode[int32(statusCode)] = &model.HTTPStats_Data{Count: 1, FirstLatencySample: 20, Latencies: nil} } httpEncoder := newHTTPEncoder(in.HTTP) @@ -115,16 +105,7 @@ func testFormatHTTPStats(t *testing.T, aggregateByStatusCode bool) { } func TestFormatHTTPStatsByPath(t *testing.T) { - t.Run("status code", func(t *testing.T) { - testFormatHTTPStatsByPath(t, true) - }) - t.Run("status class", func(t *testing.T) { - testFormatHTTPStatsByPath(t, false) - }) -} - -func testFormatHTTPStatsByPath(t *testing.T, aggregateByStatusCode bool) { - httpReqStats := http.NewRequestStats(aggregateByStatusCode) + httpReqStats := http.NewRequestStats() httpReqStats.AddRequest(100, 12.5, 0, nil) httpReqStats.AddRequest(100, 12.5, tagGnuTLS, nil) @@ -133,11 +114,11 @@ func testFormatHTTPStatsByPath(t *testing.T, aggregateByStatusCode bool) { // Verify the latency data is correct prior to serialization - latencies := httpReqStats.Data[httpReqStats.NormalizeStatusCode(100)].Latencies + latencies := httpReqStats.Data[100].Latencies assert.Equal(t, 2.0, latencies.GetCount()) verifyQuantile(t, latencies, 0.5, 12.5) - latencies = httpReqStats.Data[httpReqStats.NormalizeStatusCode(405)].Latencies + latencies = httpReqStats.Data[405].Latencies assert.Equal(t, 2.0, latencies.GetCount()) verifyQuantile(t, latencies, 0.5, 3.5) @@ -181,12 +162,12 @@ func testFormatHTTPStatsByPath(t *testing.T, aggregateByStatusCode bool) { statsByResponseStatus := endpointAggregations[0].StatsByStatusCode assert.Len(t, statsByResponseStatus, 2) - serializedLatencies := statsByResponseStatus[int32(httpReqStats.NormalizeStatusCode(100))].Latencies + serializedLatencies := statsByResponseStatus[int32(100)].Latencies sketch := unmarshalSketch(t, serializedLatencies) assert.Equal(t, 2.0, sketch.GetCount()) verifyQuantile(t, sketch, 0.5, 12.5) - serializedLatencies = statsByResponseStatus[int32(httpReqStats.NormalizeStatusCode(405))].Latencies + serializedLatencies = statsByResponseStatus[int32(405)].Latencies sketch = unmarshalSketch(t, serializedLatencies) assert.Equal(t, 2.0, sketch.GetCount()) verifyQuantile(t, sketch, 0.5, 3.5) @@ -196,16 +177,7 @@ func testFormatHTTPStatsByPath(t *testing.T, aggregateByStatusCode bool) { } func TestIDCollisionRegression(t *testing.T) { - t.Run("status code", func(t *testing.T) { - testIDCollisionRegression(t, true) - }) - t.Run("status class", func(t *testing.T) { - testIDCollisionRegression(t, false) - }) -} - -func testIDCollisionRegression(t *testing.T, aggregateByStatusCode bool) { - httpStats := http.NewRequestStats(aggregateByStatusCode) + httpStats := http.NewRequestStats() assert := assert.New(t) connections := []network.ConnectionStats{ { @@ -250,7 +222,7 @@ func testIDCollisionRegression(t *testing.T, aggregateByStatusCode bool) { // back a non-nil result aggregations, _, _ := getHTTPAggregations(t, httpEncoder, in.Conns[0]) assert.Equal("/", aggregations.EndpointAggregations[0].Path) - assert.Equal(uint32(1), aggregations.EndpointAggregations[0].StatsByStatusCode[int32(httpStats.NormalizeStatusCode(104))].Count) + assert.Equal(uint32(1), aggregations.EndpointAggregations[0].StatsByStatusCode[int32(104)].Count) // assert that the other connections sharing the same (source,destination) // addresses but different PIDs *won't* be associated with the HTTP stats @@ -264,15 +236,6 @@ func testIDCollisionRegression(t *testing.T, aggregateByStatusCode bool) { } func TestLocalhostScenario(t *testing.T) { - t.Run("status code", func(t *testing.T) { - testLocalhostScenario(t, true) - }) - t.Run("status class", func(t *testing.T) { - testLocalhostScenario(t, false) - }) -} - -func testLocalhostScenario(t *testing.T, aggregateByStatusCode bool) { assert := assert.New(t) connections := []network.ConnectionStats{ { @@ -291,7 +254,7 @@ func testLocalhostScenario(t *testing.T, aggregateByStatusCode bool) { }, } - httpStats := http.NewRequestStats(aggregateByStatusCode) + httpStats := http.NewRequestStats() httpKey := http.NewKey( util.AddressFromString("127.0.0.1"), util.AddressFromString("127.0.0.1"), @@ -337,11 +300,11 @@ func testLocalhostScenario(t *testing.T, aggregateByStatusCode bool) { // will have HTTP stats aggregations, _, _ := getHTTPAggregations(t, httpEncoder, in.Conns[0]) assert.Equal("/", aggregations.EndpointAggregations[0].Path) - assert.Equal(uint32(1), aggregations.EndpointAggregations[0].StatsByStatusCode[int32(httpStats.NormalizeStatusCode(103))].Count) + assert.Equal(uint32(1), aggregations.EndpointAggregations[0].StatsByStatusCode[int32(103)].Count) aggregations, _, _ = getHTTPAggregations(t, httpEncoder, in.Conns[1]) assert.Equal("/", aggregations.EndpointAggregations[0].Path) - assert.Equal(uint32(1), aggregations.EndpointAggregations[0].StatsByStatusCode[int32(httpStats.NormalizeStatusCode(103))].Count) + assert.Equal(uint32(1), aggregations.EndpointAggregations[0].StatsByStatusCode[int32(103)].Count) } func getHTTPAggregations(t *testing.T, encoder *httpEncoder, c network.ConnectionStats) (*model.HTTPAggregations, uint64, map[string]struct{}) { @@ -388,7 +351,7 @@ func generateBenchMarkPayload(sourcePortsMax, destPortsMax uint16) network.Conne HTTP: make(map[http.Key]*http.RequestStats), } - httpStats := http.NewRequestStats(false) + httpStats := http.NewRequestStats() httpStats.AddRequest(100, 10, 0, nil) httpStats.AddRequest(200, 10, 0, nil) httpStats.AddRequest(300, 10, 0, nil) diff --git a/pkg/network/encoding/marshal/usm_test.go b/pkg/network/encoding/marshal/usm_test.go index 75884cf36341f..ba4b921fd748f 100644 --- a/pkg/network/encoding/marshal/usm_test.go +++ b/pkg/network/encoding/marshal/usm_test.go @@ -27,7 +27,7 @@ func TestGroupByConnection(t *testing.T) { true, http.MethodGet, ) - val1 := http.NewRequestStats(false) + val1 := http.NewRequestStats() val1.AddRequest(100, 10.0, 0, nil) key2 := http.NewKey( @@ -39,7 +39,7 @@ func TestGroupByConnection(t *testing.T) { true, http.MethodGet, ) - val2 := http.NewRequestStats(false) + val2 := http.NewRequestStats() val2.AddRequest(200, 10.0, 0, nil) // Connection 2 @@ -52,7 +52,7 @@ func TestGroupByConnection(t *testing.T) { true, http.MethodGet, ) - val3 := http.NewRequestStats(false) + val3 := http.NewRequestStats() val3.AddRequest(300, 10.0, 0, nil) key4 := http.NewKey( @@ -64,7 +64,7 @@ func TestGroupByConnection(t *testing.T) { true, http.MethodGet, ) - val4 := http.NewRequestStats(false) + val4 := http.NewRequestStats() val4.AddRequest(400, 10.0, 0, nil) data := map[http.Key]*http.RequestStats{ diff --git a/pkg/network/filter/packet_source_linux.go b/pkg/network/filter/packet_source_linux.go index 65fbbe9f9270f..d0d493f82987a 100644 --- a/pkg/network/filter/packet_source_linux.go +++ b/pkg/network/filter/packet_source_linux.go @@ -155,7 +155,14 @@ func (p *AFPacketSource) VisitPackets(exit <-chan struct{}, visit func(data []by return err } - pktInfo.PktType = stats.AncillaryData[0].(afpacket.AncillaryPktType).Type + for _, data := range stats.AncillaryData { + // if addPktType = true, AncillaryData will contain an AncillaryPktType element; + // however, it might not be the first element, so scan through. + pktType, ok := data.(afpacket.AncillaryPktType) + if ok { + pktInfo.PktType = pktType.Type + } + } if err := visit(data, pktInfo, stats.Timestamp); err != nil { return err } diff --git a/pkg/network/protocols/events/consumer.go b/pkg/network/protocols/events/consumer.go index 29d7d2350737f..71927e65eddf5 100644 --- a/pkg/network/protocols/events/consumer.go +++ b/pkg/network/protocols/events/consumer.go @@ -13,6 +13,8 @@ import ( "sync" "unsafe" + "github.com/cihub/seelog" + manager "github.com/DataDog/ebpf-manager" ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" @@ -162,7 +164,9 @@ func (c *Consumer[V]) Start() { c.batchReader.ReadAll(func(_ int, b *batch) { c.process(b, true) }) - log.Debugf("usm events summary: name=%q %s", c.proto, c.metricGroup.Summary()) + if log.ShouldLog(seelog.DebugLvl) { + log.Debugf("usm events summary: name=%q %s", c.proto, c.metricGroup.Summary()) + } close(done) } } diff --git a/pkg/network/protocols/http/statkeeper.go b/pkg/network/protocols/http/statkeeper.go index 304c37699ea4b..6cb428c4722bf 100644 --- a/pkg/network/protocols/http/statkeeper.go +++ b/pkg/network/protocols/http/statkeeper.go @@ -19,14 +19,13 @@ import ( // StatKeeper is responsible for aggregating HTTP stats. type StatKeeper struct { - mux sync.Mutex - stats map[Key]*RequestStats - incomplete IncompleteBuffer - maxEntries int - quantizer *URLQuantizer - telemetry *Telemetry - connectionAggregator *utils.ConnectionAggregator - enableStatusCodeAggregation bool + mux sync.Mutex + stats map[Key]*RequestStats + incomplete IncompleteBuffer + maxEntries int + quantizer *URLQuantizer + telemetry *Telemetry + connectionAggregator *utils.ConnectionAggregator // replace rules for HTTP path replaceRules []*config.ReplaceRule @@ -51,16 +50,15 @@ func NewStatkeeper(c *config.Config, telemetry *Telemetry, incompleteBuffer Inco } return &StatKeeper{ - stats: make(map[Key]*RequestStats), - incomplete: incompleteBuffer, - maxEntries: c.MaxHTTPStatsBuffered, - quantizer: quantizer, - replaceRules: c.HTTPReplaceRules, - enableStatusCodeAggregation: c.EnableHTTPStatsByStatusCode, - connectionAggregator: connectionAggregator, - buffer: make([]byte, getPathBufferSize(c)), - telemetry: telemetry, - oversizedLogLimit: log.NewLogLimit(10, time.Minute*10), + stats: make(map[Key]*RequestStats), + incomplete: incompleteBuffer, + maxEntries: c.MaxHTTPStatsBuffered, + quantizer: quantizer, + replaceRules: c.HTTPReplaceRules, + connectionAggregator: connectionAggregator, + buffer: make([]byte, getPathBufferSize(c)), + telemetry: telemetry, + oversizedLogLimit: log.NewLogLimit(10, time.Minute*10), } } @@ -158,7 +156,7 @@ func (h *StatKeeper) add(tx Transaction) { return } h.telemetry.aggregations.Add(1) - stats = NewRequestStats(h.enableStatusCodeAggregation) + stats = NewRequestStats() h.stats[key] = stats } diff --git a/pkg/network/protocols/http/stats.go b/pkg/network/protocols/http/stats.go index ea70a34bae672..43c86354f4bcb 100644 --- a/pkg/network/protocols/http/stats.go +++ b/pkg/network/protocols/http/stats.go @@ -138,27 +138,16 @@ func (r *RequestStat) initSketch() (err error) { // RequestStats stores HTTP request statistics. type RequestStats struct { - aggregateByStatusCode bool - Data map[uint16]*RequestStat + Data map[uint16]*RequestStat } // NewRequestStats creates a new RequestStats object. -func NewRequestStats(aggregateByStatusCode bool) *RequestStats { +func NewRequestStats() *RequestStats { return &RequestStats{ - aggregateByStatusCode: aggregateByStatusCode, - Data: make(map[uint16]*RequestStat), + Data: make(map[uint16]*RequestStat), } } -// NormalizeStatusCode normalizes the status code into a status code family. -func (r *RequestStats) NormalizeStatusCode(status uint16) uint16 { - if r.aggregateByStatusCode { - return status - } - // Normalize into status code family. - return (status / 100) * 100 -} - // isValid checks is the status code is in the range of valid HTTP responses. func (r *RequestStats) isValid(status uint16) bool { return status >= 100 && status < 600 @@ -213,8 +202,6 @@ func (r *RequestStats) AddRequest(statusCode uint16, latency float64, staticTags return } - statusCode = r.NormalizeStatusCode(statusCode) - stats, exists := r.Data[statusCode] if !exists { stats = &RequestStat{} diff --git a/pkg/network/protocols/http/stats_test.go b/pkg/network/protocols/http/stats_test.go index b6ecea5c833d5..196406c9cb481 100644 --- a/pkg/network/protocols/http/stats_test.go +++ b/pkg/network/protocols/http/stats_test.go @@ -15,16 +15,7 @@ import ( ) func TestAddRequest(t *testing.T) { - t.Run("status code", func(t *testing.T) { - testAddRequest(t, true) - }) - t.Run("status class", func(t *testing.T) { - testAddRequest(t, false) - }) -} - -func testAddRequest(t *testing.T, aggregateByStatusCode bool) { - stats := NewRequestStats(aggregateByStatusCode) + stats := NewRequestStats() stats.AddRequest(405, 10.0, 1, nil) stats.AddRequest(405, 15.0, 2, nil) stats.AddRequest(405, 20.0, 3, nil) @@ -33,7 +24,7 @@ func testAddRequest(t *testing.T, aggregateByStatusCode bool) { assert.Nil(t, stats.Data[200]) assert.Nil(t, stats.Data[300]) assert.Nil(t, stats.Data[500]) - s := stats.Data[stats.NormalizeStatusCode(405)] + s := stats.Data[405] if assert.NotNil(t, s) { assert.Equal(t, 3, s.Count) @@ -46,23 +37,14 @@ func testAddRequest(t *testing.T, aggregateByStatusCode bool) { } func TestCombineWith(t *testing.T) { - t.Run("status code", func(t *testing.T) { - testCombineWith(t, true) - }) - t.Run("status class", func(t *testing.T) { - testCombineWith(t, false) - }) -} - -func testCombineWith(t *testing.T, aggregateByStatusCode bool) { - stats := NewRequestStats(aggregateByStatusCode) + stats := NewRequestStats() for i := uint16(100); i <= 500; i += 100 { assert.Nil(t, stats.Data[i]) } - stats2 := NewRequestStats(aggregateByStatusCode) - stats3 := NewRequestStats(aggregateByStatusCode) - stats4 := NewRequestStats(aggregateByStatusCode) + stats2 := NewRequestStats() + stats3 := NewRequestStats() + stats4 := NewRequestStats() stats2.AddRequest(405, 10.0, 2, nil) stats3.AddRequest(405, 15.0, 3, nil) stats4.AddRequest(405, 20.0, 4, nil) @@ -75,7 +57,7 @@ func testCombineWith(t *testing.T, aggregateByStatusCode bool) { assert.Nil(t, stats.Data[200]) assert.Nil(t, stats.Data[300]) assert.Nil(t, stats.Data[500]) - s := stats.Data[stats.NormalizeStatusCode(405)] + s := stats.Data[405] if assert.NotNil(t, s) { assert.Equal(t, 3.0, s.Latencies.GetCount()) diff --git a/pkg/network/protocols/http/telemetry.go b/pkg/network/protocols/http/telemetry.go index 736806af2a652..0dddb2591fee9 100644 --- a/pkg/network/protocols/http/telemetry.go +++ b/pkg/network/protocols/http/telemetry.go @@ -10,6 +10,8 @@ package http import ( "fmt" + "github.com/cihub/seelog" + libtelemetry "github.com/DataDog/datadog-agent/pkg/network/protocols/telemetry" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -97,5 +99,7 @@ func (t *Telemetry) Count(tx Transaction) { // Log logs the telemetry. func (t *Telemetry) Log() { - log.Debugf("%s stats summary: %s", t.protocol, t.metricGroup.Summary()) + if log.ShouldLog(seelog.DebugLvl) { + log.Debugf("%s stats summary: %s", t.protocol, t.metricGroup.Summary()) + } } diff --git a/pkg/network/protocols/http2/telemetry.go b/pkg/network/protocols/http2/telemetry.go index 35f24cfc9878b..e80b5efd1a898 100644 --- a/pkg/network/protocols/http2/telemetry.go +++ b/pkg/network/protocols/http2/telemetry.go @@ -10,6 +10,8 @@ package http2 import ( "strconv" + "github.com/cihub/seelog" + libtelemetry "github.com/DataDog/datadog-agent/pkg/network/protocols/telemetry" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -89,7 +91,9 @@ func (t *kernelTelemetry) update(tel *HTTP2Telemetry, isTLS bool) { } func (t *kernelTelemetry) Log() { - log.Debugf("http2 kernel telemetry summary: %s", t.metricGroup.Summary()) + if log.ShouldLog(seelog.DebugLvl) { + log.Debugf("http2 kernel telemetry summary: %s", t.metricGroup.Summary()) + } } // Sub generates a new HTTP2Telemetry object by subtracting the values of this HTTP2Telemetry object from the other diff --git a/pkg/network/protocols/kafka/api_version_counter.go b/pkg/network/protocols/kafka/api_version_counter.go index dbc80f1a7efca..69f640396b4bd 100644 --- a/pkg/network/protocols/kafka/api_version_counter.go +++ b/pkg/network/protocols/kafka/api_version_counter.go @@ -15,7 +15,7 @@ import ( const ( minSupportedAPIVersion = 1 - maxSupportedAPIVersion = 11 + maxSupportedAPIVersion = max(MaxSupportedProduceRequestApiVersion, MaxSupportedFetchRequestApiVersion) ) // apiVersionCounter is a Kafka API version aware counter, it has a counter for each supported Kafka API version. @@ -40,8 +40,8 @@ func newAPIVersionCounter(metricGroup *libtelemetry.MetricGroup, metricName stri // Add increments the API version counter based on the specified request api version func (c *apiVersionCounter) Add(tx *KafkaTransaction) { if tx.Request_api_version < minSupportedAPIVersion || tx.Request_api_version > maxSupportedAPIVersion { - c.hitsUnsupportedVersion.Add(1) + c.hitsUnsupportedVersion.Add(int64(tx.Records_count)) return } - c.hitsVersions[tx.Request_api_version-1].Add(1) + c.hitsVersions[tx.Request_api_version-1].Add(int64(tx.Records_count)) } diff --git a/pkg/network/protocols/kafka/statkeeper.go b/pkg/network/protocols/kafka/statkeeper.go index fe2653d84726c..61565fec23593 100644 --- a/pkg/network/protocols/kafka/statkeeper.go +++ b/pkg/network/protocols/kafka/statkeeper.go @@ -42,7 +42,7 @@ func (statKeeper *StatKeeper) Process(tx *EbpfTx) { latency := tx.RequestLatency() // Produce requests with acks = 0 do not receive a response, and as a result, have no latency if tx.APIKey() == FetchAPIKey && latency <= 0 { - statKeeper.telemetry.invalidLatency.Add(1) + statKeeper.telemetry.invalidLatency.Add(int64(tx.RecordsCount())) return } @@ -60,7 +60,7 @@ func (statKeeper *StatKeeper) Process(tx *EbpfTx) { requestStats, ok := statKeeper.stats[key] if !ok { if len(statKeeper.stats) >= statKeeper.maxEntries { - statKeeper.telemetry.dropped.Add(1) + statKeeper.telemetry.dropped.Add(int64(tx.RecordsCount())) return } requestStats = NewRequestStats() diff --git a/pkg/network/protocols/kafka/telemetry.go b/pkg/network/protocols/kafka/telemetry.go index aee8780b9a47f..9905756dd880e 100644 --- a/pkg/network/protocols/kafka/telemetry.go +++ b/pkg/network/protocols/kafka/telemetry.go @@ -8,6 +8,8 @@ package kafka import ( + "github.com/cihub/seelog" + libtelemetry "github.com/DataDog/datadog-agent/pkg/network/protocols/telemetry" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -49,5 +51,7 @@ func (t *Telemetry) Count(tx *KafkaTransaction) { // Log logs the kafka stats summary func (t *Telemetry) Log() { - log.Debugf("kafka stats summary: %s", t.metricGroup.Summary()) + if log.ShouldLog(seelog.DebugLvl) { + log.Debugf("kafka stats summary: %s", t.metricGroup.Summary()) + } } diff --git a/pkg/network/protocols/kafka/telemetry_test.go b/pkg/network/protocols/kafka/telemetry_test.go index a890f3fd1a9e3..c97fa3998aa84 100644 --- a/pkg/network/protocols/kafka/telemetry_test.go +++ b/pkg/network/protocols/kafka/telemetry_test.go @@ -26,10 +26,12 @@ func TestTelemetry_Count(t *testing.T) { tx1: &KafkaTransaction{ Request_api_key: 0, Request_api_version: 4, + Records_count: 1, }, tx2: &KafkaTransaction{ Request_api_key: 1, Request_api_version: 7, + Records_count: 4, }, }, { @@ -37,10 +39,12 @@ func TestTelemetry_Count(t *testing.T) { tx1: &KafkaTransaction{ Request_api_key: 0, Request_api_version: 0, + Records_count: 10, }, tx2: &KafkaTransaction{ Request_api_key: 1, Request_api_version: 7, + Records_count: 10, }, }, { @@ -48,10 +52,12 @@ func TestTelemetry_Count(t *testing.T) { tx1: &KafkaTransaction{ Request_api_key: 0, Request_api_version: 0, + Records_count: 10, }, tx2: &KafkaTransaction{ Request_api_key: 1, Request_api_version: 0, + Records_count: 10, }, }, { @@ -59,10 +65,12 @@ func TestTelemetry_Count(t *testing.T) { tx1: &KafkaTransaction{ Request_api_key: 3, Request_api_version: 5, + Records_count: 10, }, tx2: &KafkaTransaction{ Request_api_key: 1, Request_api_version: 8, + Records_count: 10, }, }, } @@ -81,15 +89,15 @@ func TestTelemetry_Count(t *testing.T) { func verifyHitsCount(t *testing.T, telemetry *Telemetry, tx *KafkaTransaction) { if tx.Request_api_key == 0 { if tx.Request_api_version < minSupportedAPIVersion || tx.Request_api_version > maxSupportedAPIVersion { - assert.Equal(t, telemetry.produceHits.hitsUnsupportedVersion.Get(), int64(1), "hitsUnsupportedVersion count is incorrect") + assert.Equal(t, telemetry.produceHits.hitsUnsupportedVersion.Get(), int64(tx.Records_count), "hitsUnsupportedVersion count is incorrect") return } - assert.Equal(t, telemetry.produceHits.hitsVersions[tx.Request_api_version-1].Get(), int64(1), "produceHits count is incorrect") + assert.Equal(t, telemetry.produceHits.hitsVersions[tx.Request_api_version-1].Get(), int64(tx.Records_count), "produceHits count is incorrect") } else if tx.Request_api_key == 1 { if tx.Request_api_version < minSupportedAPIVersion || tx.Request_api_version > maxSupportedAPIVersion { - assert.Equal(t, telemetry.fetchHits.hitsUnsupportedVersion.Get(), int64(1), "hitsUnsupportedVersion count is incorrect") + assert.Equal(t, telemetry.fetchHits.hitsUnsupportedVersion.Get(), int64(tx.Records_count), "hitsUnsupportedVersion count is incorrect") return } - assert.Equal(t, telemetry.fetchHits.hitsVersions[tx.Request_api_version-1].Get(), int64(1), "fetchHits count is incorrect") + assert.Equal(t, telemetry.fetchHits.hitsVersions[tx.Request_api_version-1].Get(), int64(tx.Records_count), "fetchHits count is incorrect") } } diff --git a/pkg/network/protocols/kafka/types.go b/pkg/network/protocols/kafka/types.go index e071dd1be5dc3..edc389df7e634 100644 --- a/pkg/network/protocols/kafka/types.go +++ b/pkg/network/protocols/kafka/types.go @@ -14,8 +14,10 @@ package kafka import "C" const ( - TopicNameBuckets = C.KAFKA_TELEMETRY_TOPIC_NAME_NUM_OF_BUCKETS - TopicNameMaxSize = C.TOPIC_NAME_MAX_STRING_SIZE + TopicNameBuckets = C.KAFKA_TELEMETRY_TOPIC_NAME_NUM_OF_BUCKETS + TopicNameMaxSize = C.TOPIC_NAME_MAX_STRING_SIZE + MaxSupportedProduceRequestApiVersion = C.KAFKA_MAX_SUPPORTED_PRODUCE_REQUEST_API_VERSION + MaxSupportedFetchRequestApiVersion = C.KAFKA_MAX_SUPPORTED_FETCH_REQUEST_API_VERSION ) type ConnTuple C.conn_tuple_t diff --git a/pkg/network/protocols/kafka/types_linux.go b/pkg/network/protocols/kafka/types_linux.go index ca225a50ec3a5..49aec7520aed3 100644 --- a/pkg/network/protocols/kafka/types_linux.go +++ b/pkg/network/protocols/kafka/types_linux.go @@ -4,8 +4,10 @@ package kafka const ( - TopicNameBuckets = 0xa - TopicNameMaxSize = 0x50 + TopicNameBuckets = 0xa + TopicNameMaxSize = 0x50 + MaxSupportedProduceRequestApiVersion = 0xa + MaxSupportedFetchRequestApiVersion = 0xc ) type ConnTuple struct { diff --git a/pkg/network/protocols/postgres/telemetry.go b/pkg/network/protocols/postgres/telemetry.go index 349c8cee77c05..cc17b902c24ea 100644 --- a/pkg/network/protocols/postgres/telemetry.go +++ b/pkg/network/protocols/postgres/telemetry.go @@ -10,6 +10,8 @@ package postgres import ( "fmt" + "github.com/cihub/seelog" + "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/network/protocols/postgres/ebpf" libtelemetry "github.com/DataDog/datadog-agent/pkg/network/protocols/telemetry" @@ -172,5 +174,7 @@ func (t *Telemetry) Count(tx *ebpf.EbpfEvent, eventWrapper *EventWrapper) { // Log logs the postgres stats summary func (t *Telemetry) Log() { - log.Debugf("postgres stats summary: %s", t.metricGroup.Summary()) + if log.ShouldLog(seelog.DebugLvl) { + log.Debugf("postgres stats summary: %s", t.metricGroup.Summary()) + } } diff --git a/pkg/network/protocols/telemetry/metric.go b/pkg/network/protocols/telemetry/metric.go index b44b0de0690ef..46aa226351278 100644 --- a/pkg/network/protocols/telemetry/metric.go +++ b/pkg/network/protocols/telemetry/metric.go @@ -28,14 +28,12 @@ func NewCounter(name string, tagsAndOptions ...string) *Counter { // Add value atomically func (c *Counter) Add(v int64) { - if v < 0 { - // Counters are always monotonic so we don't allow negative numbers. We + if v > 0 { + // Counters are always monotonic so we don't allow non-positive numbers. We // could enforce this by using an unsigned type, but that would make the // API a little bit more cumbersome to use. - return + c.value.Add(v) } - - c.value.Add(v) } func (c *Counter) base() *metricBase { diff --git a/pkg/network/protocols/telemetry/metric_test.go b/pkg/network/protocols/telemetry/metric_test.go index 84c28b0416a41..484656e17bf6e 100644 --- a/pkg/network/protocols/telemetry/metric_test.go +++ b/pkg/network/protocols/telemetry/metric_test.go @@ -11,6 +11,24 @@ import ( "github.com/stretchr/testify/assert" ) +func BenchmarkAddPositive(b *testing.B) { + m := NewCounter("foo") + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Add(1) + } +} + +func BenchmarkAddZero(b *testing.B) { + m := NewCounter("foo") + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Add(0) + } +} + func TestNewMetric(t *testing.T) { assert := assert.New(t) diff --git a/pkg/network/state_test.go b/pkg/network/state_test.go index d4015c6a972ca..a1134d192ce34 100644 --- a/pkg/network/state_test.go +++ b/pkg/network/state_test.go @@ -1673,15 +1673,6 @@ func TestDNSStatsWithMultipleClients(t *testing.T) { } func TestHTTPStats(t *testing.T) { - t.Run("status code", func(t *testing.T) { - testHTTPStats(t, true) - }) - t.Run("status class", func(t *testing.T) { - testHTTPStats(t, false) - }) -} - -func testHTTPStats(t *testing.T, aggregateByStatusCode bool) { c := ConnectionStats{ Source: util.AddressFromString("1.1.1.1"), Dest: util.AddressFromString("0.0.0.0"), @@ -1692,7 +1683,7 @@ func testHTTPStats(t *testing.T, aggregateByStatusCode bool) { key := http.NewKey(c.Source, c.Dest, c.SPort, c.DPort, []byte("/testpath"), true, http.MethodGet) httpStats := make(map[http.Key]*http.RequestStats) - httpStats[key] = http.NewRequestStats(aggregateByStatusCode) + httpStats[key] = http.NewRequestStats() usmStats := make(map[protocols.ProtocolType]interface{}) usmStats[protocols.HTTP] = httpStats @@ -1710,15 +1701,6 @@ func testHTTPStats(t *testing.T, aggregateByStatusCode bool) { } func TestHTTP2Stats(t *testing.T) { - t.Run("status code", func(t *testing.T) { - testHTTP2Stats(t, true) - }) - t.Run("status class", func(t *testing.T) { - testHTTP2Stats(t, false) - }) -} - -func testHTTP2Stats(t *testing.T, aggregateByStatusCode bool) { c := ConnectionStats{ Source: util.AddressFromString("1.1.1.1"), Dest: util.AddressFromString("0.0.0.0"), @@ -1730,7 +1712,7 @@ func testHTTP2Stats(t *testing.T, aggregateByStatusCode bool) { key := http.NewKey(c.Source, c.Dest, c.SPort, c.DPort, []byte(path), true, http.MethodGet) http2Stats := make(map[http.Key]*http.RequestStats) - http2Stats[key] = http.NewRequestStats(aggregateByStatusCode) + http2Stats[key] = http.NewRequestStats() usmStats := make(map[protocols.ProtocolType]interface{}) usmStats[protocols.HTTP2] = http2Stats @@ -1751,15 +1733,6 @@ func testHTTP2Stats(t *testing.T, aggregateByStatusCode bool) { } func TestHTTPStatsWithMultipleClients(t *testing.T) { - t.Run("status code", func(t *testing.T) { - testHTTPStatsWithMultipleClients(t, true) - }) - t.Run("status class", func(t *testing.T) { - testHTTPStatsWithMultipleClients(t, false) - }) -} - -func testHTTPStatsWithMultipleClients(t *testing.T, aggregateByStatusCode bool) { c := ConnectionStats{ Source: util.AddressFromString("1.1.1.1"), Dest: util.AddressFromString("0.0.0.0"), @@ -1770,7 +1743,7 @@ func testHTTPStatsWithMultipleClients(t *testing.T, aggregateByStatusCode bool) getStats := func(path string) map[protocols.ProtocolType]interface{} { httpStats := make(map[http.Key]*http.RequestStats) key := http.NewKey(c.Source, c.Dest, c.SPort, c.DPort, []byte(path), true, http.MethodGet) - httpStats[key] = http.NewRequestStats(aggregateByStatusCode) + httpStats[key] = http.NewRequestStats() usmStats := make(map[protocols.ProtocolType]interface{}) usmStats[protocols.HTTP] = httpStats @@ -1823,15 +1796,6 @@ func testHTTPStatsWithMultipleClients(t *testing.T, aggregateByStatusCode bool) } func TestHTTP2StatsWithMultipleClients(t *testing.T) { - t.Run("status code", func(t *testing.T) { - testHTTP2StatsWithMultipleClients(t, true) - }) - t.Run("status class", func(t *testing.T) { - testHTTP2StatsWithMultipleClients(t, false) - }) -} - -func testHTTP2StatsWithMultipleClients(t *testing.T, aggregateByStatusCode bool) { c := ConnectionStats{ Source: util.AddressFromString("1.1.1.1"), Dest: util.AddressFromString("0.0.0.0"), @@ -1842,7 +1806,7 @@ func testHTTP2StatsWithMultipleClients(t *testing.T, aggregateByStatusCode bool) getStats := func(path string) map[protocols.ProtocolType]interface{} { http2Stats := make(map[http.Key]*http.RequestStats) key := http.NewKey(c.Source, c.Dest, c.SPort, c.DPort, []byte(path), true, http.MethodGet) - http2Stats[key] = http.NewRequestStats(aggregateByStatusCode) + http2Stats[key] = http.NewRequestStats() usmStats := make(map[protocols.ProtocolType]interface{}) usmStats[protocols.HTTP2] = http2Stats diff --git a/pkg/network/usm/monitor_test.go b/pkg/network/usm/monitor_test.go index c9004db6adecc..bbbe03202b32e 100644 --- a/pkg/network/usm/monitor_test.go +++ b/pkg/network/usm/monitor_test.go @@ -108,54 +108,35 @@ func TestHTTP(t *testing.T) { func (s *HTTPTestSuite) TestHTTPStats() { t := s.T() - testCases := []struct { - name string - aggregateByStatusCode bool - }{ - { - name: "status code", - aggregateByStatusCode: true, - }, - { - name: "status class", - aggregateByStatusCode: false, - }, - } - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - // Start an HTTP server on localhost:8080 - serverAddr := "127.0.0.1:8080" - srvDoneFn := testutil.HTTPServer(t, serverAddr, testutil.Options{ - EnableKeepAlive: true, - }) - t.Cleanup(srvDoneFn) + // Start an HTTP server on localhost:8080 + serverAddr := "127.0.0.1:8080" + srvDoneFn := testutil.HTTPServer(t, serverAddr, testutil.Options{ + EnableKeepAlive: true, + }) + t.Cleanup(srvDoneFn) - cfg := config.New() - cfg.EnableHTTPStatsByStatusCode = tt.aggregateByStatusCode - monitor := newHTTPMonitorWithCfg(t, cfg) + monitor := newHTTPMonitorWithCfg(t, config.New()) - resp, err := nethttp.Get(fmt.Sprintf("http://%s/%d/test", serverAddr, nethttp.StatusNoContent)) - require.NoError(t, err) - _ = resp.Body.Close() - srvDoneFn() + resp, err := nethttp.Get(fmt.Sprintf("http://%s/%d/test", serverAddr, nethttp.StatusNoContent)) + require.NoError(t, err) + _ = resp.Body.Close() + srvDoneFn() - // Iterate through active connections until we find connection created above - require.Eventuallyf(t, func() bool { - stats := getHTTPLikeProtocolStats(monitor, protocols.HTTP) + // Iterate through active connections until we find connection created above + require.Eventuallyf(t, func() bool { + stats := getHTTPLikeProtocolStats(monitor, protocols.HTTP) - for key, reqStats := range stats { - if key.Method == http.MethodGet && strings.HasSuffix(key.Path.Content.Get(), "/test") && (key.SrcPort == 8080 || key.DstPort == 8080) { - currentStats := reqStats.Data[reqStats.NormalizeStatusCode(204)] - if currentStats != nil && currentStats.Count == 1 { - return true - } - } + for key, reqStats := range stats { + if key.Method == http.MethodGet && strings.HasSuffix(key.Path.Content.Get(), "/test") && (key.SrcPort == 8080 || key.DstPort == 8080) { + currentStats := reqStats.Data[204] + if currentStats != nil && currentStats.Count == 1 { + return true } + } + } - return false - }, 3*time.Second, 100*time.Millisecond, "couldn't find http connection matching: %s", serverAddr) - }) - } + return false + }, 3*time.Second, 100*time.Millisecond, "couldn't find http connection matching: %s", serverAddr) } // TestHTTPMonitorLoadWithIncompleteBuffers sends thousands of requests without getting responses for them, in parallel diff --git a/pkg/network/usm/monitor_tls_test.go b/pkg/network/usm/monitor_tls_test.go index d4c012a0747f5..7288ab33b0e87 100644 --- a/pkg/network/usm/monitor_tls_test.go +++ b/pkg/network/usm/monitor_tls_test.go @@ -746,7 +746,6 @@ func testHTTPSGoTLSCaptureNewProcessContainer(t *testing.T, cfg *config.Config) // Setup cfg.EnableGoTLSSupport = true cfg.EnableHTTPMonitoring = true - cfg.EnableHTTPStatsByStatusCode = true usmMonitor := setupUSMTLSMonitor(t, cfg) @@ -781,7 +780,6 @@ func testHTTPSGoTLSCaptureAlreadyRunningContainer(t *testing.T, cfg *config.Conf // Setup cfg.EnableGoTLSSupport = true cfg.EnableHTTPMonitoring = true - cfg.EnableHTTPStatsByStatusCode = true usmMonitor := setupUSMTLSMonitor(t, cfg) diff --git a/pkg/network/usm/utils/file_registry.go b/pkg/network/usm/utils/file_registry.go index 2b212074a7413..e4b4b376a1d44 100644 --- a/pkg/network/usm/utils/file_registry.go +++ b/pkg/network/usm/utils/file_registry.go @@ -13,8 +13,8 @@ import ( "os" "sync" + "github.com/cihub/seelog" "github.com/hashicorp/golang-lru/v2/simplelru" - "go.uber.org/atomic" "github.com/DataDog/datadog-agent/pkg/network/protocols/telemetry" @@ -164,14 +164,25 @@ func (r *FileRegistry) Register(namespacedPath string, pid uint32, activationCB, } if err := activationCB(path); err != nil { + // We need to call `deactivationCB` here as some uprobes could be + // already attached. This could be the case even when the checks below + // indicate that the process is gone, since the process could disappear + // between two uprobe registrations. + _ = deactivationCB(FilePath{ID: pathID}) + // short living process would be hard to catch and will failed when we try to open the library // so let's failed silently if errors.Is(err, os.ErrNotExist) { return err } + // Adding the path to the block list is irreversible, so double-check + // that we really didn't fail because the process is gone (the path is + // /proc/PID/root/...), since we can't be certain that ErrNotExist is + // always correctly propagated from the activation callback. + if _, statErr := os.Stat(path.HostPath); errors.Is(statErr, os.ErrNotExist) { + return errors.Join(statErr, err) + } - // we are calling `deactivationCB` here as some uprobes could be already attached - err = deactivationCB(FilePath{ID: pathID}) if r.blocklistByID != nil { // add `pathID` to blocklist so we don't attempt to re-register files // that are problematic for some reason @@ -244,7 +255,9 @@ func (r *FileRegistry) GetRegisteredProcesses() map[uint32]struct{} { // Log state of `FileRegistry` func (r *FileRegistry) Log() { - log.Debugf("file_registry summary: program=%s %s", r.telemetry.programName, r.telemetry.metricGroup.Summary()) + if log.ShouldLog(seelog.DebugLvl) { + log.Debugf("file_registry summary: program=%s %s", r.telemetry.programName, r.telemetry.metricGroup.Summary()) + } } // Clear removes all registrations calling their deactivation callbacks diff --git a/pkg/network/usm/utils/file_registry_test.go b/pkg/network/usm/utils/file_registry_test.go index 6c8e6cac22a07..51ee17f600345 100644 --- a/pkg/network/usm/utils/file_registry_test.go +++ b/pkg/network/usm/utils/file_registry_test.go @@ -212,19 +212,26 @@ func TestFailedRegistration(t *testing.T) { registerRecorder.ReturnError = fmt.Errorf("failed registration") registerCallback := registerRecorder.Callback() + unregisterRecorder := new(CallbackRecorder) + unregisterCallback := unregisterRecorder.Callback() + r := newFileRegistry() path, pathID := createTempTestFile(t, "foobar") cmd, err := testutil.OpenFromAnotherProcess(t, path) require.NoError(t, err) pid := uint32(cmd.Process.Pid) - require.NoError(t, r.Register(path, pid, registerCallback, IgnoreCB, IgnoreCB)) + err = r.Register(path, pid, registerCallback, unregisterCallback, IgnoreCB) + require.ErrorIs(t, err, registerRecorder.ReturnError) // First let's assert that the callback was executed once, but there are no // registered processes because the registration should have failed assert.Equal(t, 1, registerRecorder.CallsForPathID(pathID)) assert.Empty(t, r.GetRegisteredProcesses()) + // The unregister callback should have been called to clean up the failed registration. + assert.Equal(t, 1, unregisterRecorder.CallsForPathID(pathID)) + // Now let's try to register the same process again require.Equal(t, errPathIsBlocked, r.Register(path, pid, registerCallback, IgnoreCB, IgnoreCB)) @@ -233,6 +240,53 @@ func TestFailedRegistration(t *testing.T) { assert.Equal(t, 1, registerRecorder.CallsForPathID(pathID)) } +func TestShortLivedProcess(t *testing.T) { + // Create a callback recorder that returns an error on purpose + registerRecorder := new(CallbackRecorder) + registerRecorder.ReturnError = fmt.Errorf("failed registration") + recorderCallback := registerRecorder.Callback() + + unregisterRecorder := new(CallbackRecorder) + unregisterCallback := unregisterRecorder.Callback() + + r := newFileRegistry() + path, pathID := createTempTestFile(t, "foobar") + cmd, err := testutil.OpenFromAnotherProcess(t, path) + require.NoError(t, err) + pid := uint32(cmd.Process.Pid) + + registerCallback := func(fp FilePath) error { + // Simulate a short-lived process by killing it during the registration. + cmd.Process.Kill() + cmd.Process.Wait() + return recorderCallback(fp) + } + + err = r.Register(path, pid, registerCallback, unregisterCallback, IgnoreCB) + require.ErrorIs(t, err, registerRecorder.ReturnError) + + // First let's assert that the callback was executed once, but there are no + // registered processes because the registration should have failed + assert.Equal(t, 1, registerRecorder.CallsForPathID(pathID)) + assert.Empty(t, r.GetRegisteredProcesses()) + + // The unregister callback should have been called to clean up the failed registration. + assert.Equal(t, 1, unregisterRecorder.CallsForPathID(pathID)) + + cmd, err = testutil.OpenFromAnotherProcess(t, path) + require.NoError(t, err) + pid = uint32(cmd.Process.Pid) + + registerRecorder.ReturnError = nil + + // Now let's try to register the same path again + require.Nil(t, r.Register(path, pid, recorderCallback, IgnoreCB, IgnoreCB)) + + // Assert that the path is successfully registered since it shouldn't have been blocked. + assert.Equal(t, 2, registerRecorder.CallsForPathID(pathID)) + assert.Contains(t, r.GetRegisteredProcesses(), pid) +} + func TestFilePathInCallbackArgument(t *testing.T) { var capturedPath string callback := func(f FilePath) error { diff --git a/pkg/networkpath/traceroute/tcp/utils.go b/pkg/networkpath/traceroute/tcp/utils.go index 7eb8c5cf45222..be2ed9b6812c7 100644 --- a/pkg/networkpath/traceroute/tcp/utils.go +++ b/pkg/networkpath/traceroute/tcp/utils.go @@ -57,7 +57,7 @@ type ( tcpResponse struct { SrcIP net.IP DstIP net.IP - TCPResponse *layers.TCP + TCPResponse layers.TCP } rawConnWrapper interface { @@ -201,6 +201,7 @@ func listenPackets(icmpConn rawConnWrapper, tcpConn rawConnWrapper, timeout time // timeout or if the listener is canceled, it should return a canceledError func handlePackets(ctx context.Context, conn rawConnWrapper, listener string, localIP net.IP, localPort uint16, remoteIP net.IP, remotePort uint16, seqNum uint32) (net.IP, uint16, layers.ICMPv4TypeCode, time.Time, error) { buf := make([]byte, 1024) + tp := newTCPParser() for { select { case <-ctx.Done(): @@ -230,16 +231,16 @@ func handlePackets(ctx context.Context, conn rawConnWrapper, listener string, lo if listener == "icmp" { icmpResponse, err := parseICMP(header, packet) if err != nil { - log.Tracef("failed to parse ICMP packet: %s", err.Error()) + log.Tracef("failed to parse ICMP packet: %s", err) continue } if icmpMatch(localIP, localPort, remoteIP, remotePort, seqNum, icmpResponse) { return icmpResponse.SrcIP, 0, icmpResponse.TypeCode, received, nil } } else if listener == "tcp" { - tcpResp, err := parseTCP(header, packet) + tcpResp, err := tp.parseTCP(header, packet) if err != nil { - log.Tracef("failed to parse TCP packet: %s", err.Error()) + log.Tracef("failed to parse TCP packet: %s", err) continue } if tcpMatch(localIP, localPort, remoteIP, remotePort, seqNum, tcpResp) { @@ -309,25 +310,37 @@ func parseICMP(header *ipv4.Header, payload []byte) (*icmpResponse, error) { return &icmpResponse, nil } -func parseTCP(header *ipv4.Header, payload []byte) (*tcpResponse, error) { - tcpResponse := tcpResponse{} +type tcpParser struct { + layer layers.TCP + decoded []gopacket.LayerType + decodingLayerParser *gopacket.DecodingLayerParser +} + +func newTCPParser() *tcpParser { + tcpParser := &tcpParser{} + tcpParser.decodingLayerParser = gopacket.NewDecodingLayerParser(layers.LayerTypeTCP, &tcpParser.layer) + return tcpParser +} +func (tp *tcpParser) parseTCP(header *ipv4.Header, payload []byte) (*tcpResponse, error) { if header.Protocol != IPProtoTCP || header.Version != 4 || header.Src == nil || header.Dst == nil { return nil, fmt.Errorf("invalid IP header for TCP packet: %+v", header) } - tcpResponse.SrcIP = header.Src - tcpResponse.DstIP = header.Dst - var tcpLayer layers.TCP - decoded := []gopacket.LayerType{} - tcpParser := gopacket.NewDecodingLayerParser(layers.LayerTypeTCP, &tcpLayer) - if err := tcpParser.DecodeLayers(payload, &decoded); err != nil { + if err := tp.decodingLayerParser.DecodeLayers(payload, &tp.decoded); err != nil { return nil, fmt.Errorf("failed to decode TCP packet: %w", err) } - tcpResponse.TCPResponse = &tcpLayer - return &tcpResponse, nil + resp := &tcpResponse{ + SrcIP: header.Src, + DstIP: header.Dst, + TCPResponse: tp.layer, + } + // make sure the TCP layer is cleared between runs + tp.layer = layers.TCP{} + + return resp, nil } func icmpMatch(localIP net.IP, localPort uint16, remoteIP net.IP, remotePort uint16, seqNum uint32, response *icmpResponse) bool { diff --git a/pkg/networkpath/traceroute/tcp/utils_test.go b/pkg/networkpath/traceroute/tcp/utils_test.go index a7ac53e57cc68..b38d7fd5bc492 100644 --- a/pkg/networkpath/traceroute/tcp/utils_test.go +++ b/pkg/networkpath/traceroute/tcp/utils_test.go @@ -305,15 +305,16 @@ func Test_parseTCP(t *testing.T) { expected: &tcpResponse{ SrcIP: srcIP, DstIP: dstIP, - TCPResponse: encodedTCPLayer, + TCPResponse: *encodedTCPLayer, }, errMsg: "", }, } + tp := newTCPParser() for _, test := range tt { t.Run(test.description, func(t *testing.T) { - actual, err := parseTCP(test.inHeader, test.inPayload) + actual, err := tp.parseTCP(test.inHeader, test.inPayload) if test.errMsg != "" { require.Error(t, err) assert.Contains(t, err.Error(), test.errMsg) @@ -331,6 +332,24 @@ func Test_parseTCP(t *testing.T) { } } +func BenchmarkParseTCP(b *testing.B) { + ipv4Header := createMockIPv4Header(srcIP, dstIP, 6) // 6 is TCP + tcpLayer := createMockTCPLayer(12345, 443, 28394, 12737, true, true, true) + + // full packet + _, fullTCPPacket := createMockTCPPacket(ipv4Header, tcpLayer) + + tp := newTCPParser() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := tp.parseTCP(ipv4Header, fullTCPPacket) + if err != nil { + b.Fatal(err) + } + } +} + func (m *mockRawConn) SetReadDeadline(t time.Time) error { if m.setReadDeadlineErr != nil { return m.setReadDeadlineErr diff --git a/pkg/security/ebpf/c/include/hooks/network/net_device.h b/pkg/security/ebpf/c/include/hooks/network/net_device.h index be38d067b0886..0f3cfcee66ee5 100644 --- a/pkg/security/ebpf/c/include/hooks/network/net_device.h +++ b/pkg/security/ebpf/c/include/hooks/network/net_device.h @@ -109,30 +109,6 @@ int hook___dev_get_by_index(ctx_t *ctx) { return 0; }; -HOOK_ENTRY("__dev_get_by_name") -int hook___dev_get_by_name(ctx_t *ctx) { - u64 id = bpf_get_current_pid_tgid(); - struct net *net = (struct net *)CTX_PARM1(ctx); - - struct device_name_t name = { - .netns = get_netns_from_net(net), - }; - bpf_probe_read_str(&name.name[0], sizeof(name.name), (void *)CTX_PARM2(ctx)); - - struct device_ifindex_t *ifindex = bpf_map_lookup_elem(&veth_device_name_to_ifindex, &name); - if (ifindex == NULL) { - return 0; - } - - struct device_ifindex_t entry = { - .netns = name.netns, - .ifindex = ifindex->ifindex, - }; - - bpf_map_update_elem(&netdevice_lookup_cache, &id, &entry, BPF_ANY); - return 0; -}; - HOOK_EXIT("register_netdevice") int rethook_register_netdevice(ctx_t *ctx) { u64 id = bpf_get_current_pid_tgid(); diff --git a/pkg/security/ebpf/c/include/maps.h b/pkg/security/ebpf/c/include/maps.h index 2d567c8e2c0c5..02974e0286dd5 100644 --- a/pkg/security/ebpf/c/include/maps.h +++ b/pkg/security/ebpf/c/include/maps.h @@ -59,7 +59,6 @@ BPF_LRU_MAP(conntrack, struct namespaced_flow_t, struct namespaced_flow_t, 4096) BPF_LRU_MAP(io_uring_ctx_pid, void *, u64, 2048) BPF_LRU_MAP(veth_state_machine, u64, struct veth_state_t, 1024) BPF_LRU_MAP(veth_devices, struct device_ifindex_t, struct device_t, 1024) -BPF_LRU_MAP(veth_device_name_to_ifindex, struct device_name_t, struct device_ifindex_t, 1024) BPF_LRU_MAP(exec_file_cache, u64, struct file_t, 4096) BPF_LRU_MAP(syscall_monitor, struct syscall_monitor_key_t, struct syscall_monitor_entry_t, 2048) BPF_LRU_MAP(syscall_table, struct syscall_table_key_t, u8, 50) diff --git a/pkg/security/ebpf/probes/event_types.go b/pkg/security/ebpf/probes/event_types.go index 2c3d19d637464..d882f048fb299 100644 --- a/pkg/security/ebpf/probes/event_types.go +++ b/pkg/security/ebpf/probes/event_types.go @@ -12,6 +12,7 @@ import ( manager "github.com/DataDog/ebpf-manager" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" + "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/utils" ) @@ -59,7 +60,6 @@ func NetworkSelectors() []manager.ProbesSelector { kprobeOrFentry("dev_new_index"), kretprobeOrFexit("dev_new_index"), kprobeOrFentry("__dev_get_by_index"), - kprobeOrFentry("__dev_get_by_name"), }}, } } @@ -449,24 +449,6 @@ func GetSelectorsPerEventType(fentry bool) map[eval.EventType][]manager.ProbesSe &manager.BestEffort{Selectors: ExpandSyscallProbesSelector(SecurityAgentUID, "bind", fentry, EntryAndExit)}, }, - // List of probes required to capture DNS events - "dns": { - &manager.AllOf{Selectors: []manager.ProbesSelector{ - &manager.AllOf{Selectors: NetworkSelectors()}, - &manager.AllOf{Selectors: NetworkVethSelectors()}, - kprobeOrFentry("security_socket_bind"), - }}, - }, - - // List of probes required to capture IMDS events - "imds": { - &manager.AllOf{Selectors: []manager.ProbesSelector{ - &manager.AllOf{Selectors: NetworkSelectors()}, - &manager.AllOf{Selectors: NetworkVethSelectors()}, - kprobeOrFentry("security_socket_bind"), - }}, - }, - // List of probes required to capture chdir events "chdir": { &manager.AllOf{Selectors: []manager.ProbesSelector{ @@ -477,11 +459,25 @@ func GetSelectorsPerEventType(fentry bool) map[eval.EventType][]manager.ProbesSe }, } + // Add probes required to track network interfaces and map network flows to processes + // networkEventTypes: dns, imds, packet + networkEventTypes := model.GetEventTypePerCategory(model.NetworkCategory)[model.NetworkCategory] + for _, networkEventType := range networkEventTypes { + selectorsPerEventTypeStore[networkEventType] = []manager.ProbesSelector{ + &manager.AllOf{Selectors: []manager.ProbesSelector{ + &manager.AllOf{Selectors: NetworkSelectors()}, + &manager.AllOf{Selectors: NetworkVethSelectors()}, + }}, + } + } + // add probes depending on loaded modules loadedModules, err := utils.FetchLoadedModules() if err == nil { if _, ok := loadedModules["nf_nat"]; ok { - selectorsPerEventTypeStore["dns"] = append(selectorsPerEventTypeStore["dns"], NetworkNFNatSelectors()...) + for _, networkEventType := range networkEventTypes { + selectorsPerEventTypeStore[networkEventType] = append(selectorsPerEventTypeStore[networkEventType], NetworkNFNatSelectors()...) + } } } diff --git a/pkg/security/ebpf/probes/net_device.go b/pkg/security/ebpf/probes/net_device.go index e04d85ba0fd74..91f18f0594f5e 100644 --- a/pkg/security/ebpf/probes/net_device.go +++ b/pkg/security/ebpf/probes/net_device.go @@ -48,12 +48,6 @@ func getNetDeviceProbes() []*manager.Probe { EBPFFuncName: "hook___dev_get_by_index", }, }, - { - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - UID: SecurityAgentUID, - EBPFFuncName: "hook___dev_get_by_name", - }, - }, { ProbeIdentificationPair: manager.ProbeIdentificationPair{ UID: SecurityAgentUID, diff --git a/pkg/security/events/rate_limiter.go b/pkg/security/events/rate_limiter.go index 2c800f00565f1..77d1999349043 100644 --- a/pkg/security/events/rate_limiter.go +++ b/pkg/security/events/rate_limiter.go @@ -35,14 +35,14 @@ const ( ) var ( - defaultPerRuleLimiters = map[eval.RuleID]Limiter{ - RulesetLoadedRuleID: NewStdLimiter(rate.Inf, 1), // No limit on ruleset loaded - HeartbeatRuleID: NewStdLimiter(rate.Inf, 1), // No limit on heartbeat - AbnormalPathRuleID: NewStdLimiter(rate.Every(30*time.Second), 1), - NoProcessContextErrorRuleID: NewStdLimiter(rate.Every(30*time.Second), 1), - BrokenProcessLineageErrorRuleID: NewStdLimiter(rate.Every(30*time.Second), 1), - EBPFLessHelloMessageRuleID: NewStdLimiter(rate.Inf, 1), // No limit on hello message - InternalCoreDumpRuleID: NewStdLimiter(rate.Every(30*time.Second), 1), + defaultPerRuleLimiters = map[eval.RuleID]rate.Limit{ + RulesetLoadedRuleID: rate.Inf, // No limit on ruleset loaded + HeartbeatRuleID: rate.Inf, // No limit on heartbeat + AbnormalPathRuleID: rate.Every(30 * time.Second), + NoProcessContextErrorRuleID: rate.Every(30 * time.Second), + BrokenProcessLineageErrorRuleID: rate.Every(30 * time.Second), + EBPFLessHelloMessageRuleID: rate.Inf, // No limit on hello message + InternalCoreDumpRuleID: rate.Every(30 * time.Second), } ) @@ -72,8 +72,8 @@ func NewRateLimiter(config *config.RuntimeSecurityConfig, client statsd.ClientIn } func (rl *RateLimiter) applyBaseLimitersFromDefault(limiters map[string]Limiter) { - for id, limiter := range defaultPerRuleLimiters { - limiters[id] = limiter + for id, rate := range defaultPerRuleLimiters { + limiters[id] = NewStdLimiter(rate, 1) } limiter, err := NewAnomalyDetectionLimiter(rl.config.AnomalyDetectionRateLimiterNumKeys, rl.config.AnomalyDetectionRateLimiterNumEventsAllowed, rl.config.AnomalyDetectionRateLimiterPeriod) diff --git a/pkg/security/module/grpc.go b/pkg/security/module/grpc.go index 7320713cc9fe6..c2b7f1e6c8be3 100644 --- a/pkg/security/module/grpc.go +++ b/pkg/security/module/grpc.go @@ -19,18 +19,19 @@ import ( // GRPCServer defines a gRPC server type GRPCServer struct { - server *grpc.Server - netListener net.Listener - wg sync.WaitGroup - family string - address string + server *grpc.Server + wg sync.WaitGroup + family string + address string } // NewGRPCServer returns a new gRPC server func NewGRPCServer(family string, address string) *GRPCServer { // force socket cleanup of previous socket not cleanup if family == "unix" { - _ = os.Remove(address) + if err := os.Remove(address); err != nil && !os.IsNotExist(err) { + seclog.Errorf("error removing the previous runtime security socket: %v", err) + } } return &GRPCServer{ @@ -53,8 +54,6 @@ func (g *GRPCServer) Start() error { } } - g.netListener = ln - g.wg.Add(1) go func() { defer g.wg.Done() @@ -73,10 +72,9 @@ func (g *GRPCServer) Stop() { g.server.Stop() } - if g.netListener != nil { - g.netListener.Close() - if g.family == "unix" { - _ = os.Remove(g.address) + if g.family == "unix" { + if err := os.Remove(g.address); err != nil && !os.IsNotExist(err) { + seclog.Errorf("error removing the runtime security socket: %v", err) } } diff --git a/pkg/security/probe/actions.go b/pkg/security/probe/actions.go index 4f1ac2a46621e..3e53dcc77c6a8 100644 --- a/pkg/security/probe/actions.go +++ b/pkg/security/probe/actions.go @@ -17,19 +17,31 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/utils" ) +// KillActionStatus defines the status of a kill action +type KillActionStatus string + +const ( + // KillActionStatusPerformed indicates the kill action was performed + KillActionStatusPerformed KillActionStatus = "performed" + // KillActionStatusRuleDisarmed indicates the kill action was skipped because the rule was disarmed + KillActionStatusRuleDisarmed KillActionStatus = "rule_disarmed" +) + // KillActionReport defines a kill action reports type KillActionReport struct { sync.RWMutex - Signal string - Scope string - Pid uint32 - CreatedAt time.Time - DetectedAt time.Time - KilledAt time.Time - ExitedAt time.Time + Signal string + Scope string + Status KillActionStatus + CreatedAt time.Time + DetectedAt time.Time + KilledAt time.Time + ExitedAt time.Time + DisarmerType string // internal + Pid uint32 resolved bool rule *rules.Rule } @@ -37,14 +49,16 @@ type KillActionReport struct { // JKillActionReport used to serialize date // easyjson:json type JKillActionReport struct { - Type string `json:"type"` - Signal string `json:"signal"` - Scope string `json:"scope"` - CreatedAt utils.EasyjsonTime `json:"created_at"` - DetectedAt utils.EasyjsonTime `json:"detected_at"` - KilledAt utils.EasyjsonTime `json:"killed_at"` - ExitedAt *utils.EasyjsonTime `json:"exited_at,omitempty"` - TTR string `json:"ttr,omitempty"` + Type string `json:"type"` + Signal string `json:"signal"` + Scope string `json:"scope"` + Status string `json:"status"` + DisarmerType string `json:"disarmer_type,omitempty"` + CreatedAt utils.EasyjsonTime `json:"created_at"` + DetectedAt utils.EasyjsonTime `json:"detected_at"` + KilledAt *utils.EasyjsonTime `json:"killed_at,omitempty"` + ExitedAt *utils.EasyjsonTime `json:"exited_at,omitempty"` + TTR string `json:"ttr,omitempty"` } // IsResolved return if the action is resolved @@ -53,7 +67,7 @@ func (k *KillActionReport) IsResolved() bool { defer k.RUnlock() // for sigkill wait for exit - return k.Signal != "SIGKILL" || k.resolved + return k.Signal != "SIGKILL" || k.resolved || k.Status == KillActionStatusRuleDisarmed } // ToJSON marshal the action @@ -62,13 +76,15 @@ func (k *KillActionReport) ToJSON() ([]byte, error) { defer k.RUnlock() jk := JKillActionReport{ - Type: rules.KillAction, - Signal: k.Signal, - Scope: k.Scope, - CreatedAt: utils.NewEasyjsonTime(k.CreatedAt), - DetectedAt: utils.NewEasyjsonTime(k.DetectedAt), - KilledAt: utils.NewEasyjsonTime(k.KilledAt), - ExitedAt: utils.NewEasyjsonTimeIfNotZero(k.ExitedAt), + Type: rules.KillAction, + Signal: k.Signal, + Scope: k.Scope, + Status: string(k.Status), + DisarmerType: k.DisarmerType, + CreatedAt: utils.NewEasyjsonTime(k.CreatedAt), + DetectedAt: utils.NewEasyjsonTime(k.DetectedAt), + KilledAt: utils.NewEasyjsonTimeIfNotZero(k.KilledAt), + ExitedAt: utils.NewEasyjsonTimeIfNotZero(k.ExitedAt), } if !k.ExitedAt.IsZero() { diff --git a/pkg/security/probe/actions_easyjson.go b/pkg/security/probe/actions_easyjson.go index 7229f234dc676..059c4bcf38117 100644 --- a/pkg/security/probe/actions_easyjson.go +++ b/pkg/security/probe/actions_easyjson.go @@ -43,6 +43,10 @@ func easyjsonB97b45a3DecodeGithubComDataDogDatadogAgentPkgSecurityProbe(in *jlex out.Signal = string(in.String()) case "scope": out.Scope = string(in.String()) + case "status": + out.Status = string(in.String()) + case "disarmer_type": + out.DisarmerType = string(in.String()) case "created_at": if data := in.Raw(); in.Ok() { in.AddError((out.CreatedAt).UnmarshalJSON(data)) @@ -52,8 +56,16 @@ func easyjsonB97b45a3DecodeGithubComDataDogDatadogAgentPkgSecurityProbe(in *jlex in.AddError((out.DetectedAt).UnmarshalJSON(data)) } case "killed_at": - if data := in.Raw(); in.Ok() { - in.AddError((out.KilledAt).UnmarshalJSON(data)) + if in.IsNull() { + in.Skip() + out.KilledAt = nil + } else { + if out.KilledAt == nil { + out.KilledAt = new(utils.EasyjsonTime) + } + if data := in.Raw(); in.Ok() { + in.AddError((*out.KilledAt).UnmarshalJSON(data)) + } } case "exited_at": if in.IsNull() { @@ -98,6 +110,16 @@ func easyjsonB97b45a3EncodeGithubComDataDogDatadogAgentPkgSecurityProbe(out *jwr out.RawString(prefix) out.String(string(in.Scope)) } + { + const prefix string = ",\"status\":" + out.RawString(prefix) + out.String(string(in.Status)) + } + if in.DisarmerType != "" { + const prefix string = ",\"disarmer_type\":" + out.RawString(prefix) + out.String(string(in.DisarmerType)) + } { const prefix string = ",\"created_at\":" out.RawString(prefix) @@ -108,10 +130,10 @@ func easyjsonB97b45a3EncodeGithubComDataDogDatadogAgentPkgSecurityProbe(out *jwr out.RawString(prefix) (in.DetectedAt).MarshalEasyJSON(out) } - { + if in.KilledAt != nil { const prefix string = ",\"killed_at\":" out.RawString(prefix) - (in.KilledAt).MarshalEasyJSON(out) + (*in.KilledAt).MarshalEasyJSON(out) } if in.ExitedAt != nil { const prefix string = ",\"exited_at\":" diff --git a/pkg/security/probe/constantfetch/available.go b/pkg/security/probe/constantfetch/available.go index 2e498c5a72ff7..bdbb9edcf4620 100644 --- a/pkg/security/probe/constantfetch/available.go +++ b/pkg/security/probe/constantfetch/available.go @@ -10,6 +10,7 @@ package constantfetch import ( "errors" + "fmt" "github.com/DataDog/datadog-go/v5/statsd" "github.com/cilium/ebpf/btf" @@ -49,21 +50,30 @@ func GetAvailableConstantFetchers(config *config.Config, kv *kernel.Version, sta return fetchers } -// GetHasUsernamespaceFirstArgWithBtf uses BTF to check if the security_inode_setattr function has a user namespace as its first argument -func GetHasUsernamespaceFirstArgWithBtf() (bool, error) { +func getBTFFuncProto(funcName string) (*btf.FuncProto, error) { spec, err := pkgebpf.GetKernelSpec() if err != nil { - return false, err + return nil, err } var function *btf.Func - if err := spec.TypeByName("security_inode_setattr", &function); err != nil { - return false, err + if err := spec.TypeByName(funcName, &function); err != nil { + return nil, err } proto, ok := function.Type.(*btf.FuncProto) if !ok { - return false, errors.New("security_inode_setattr has no prototype") + return nil, fmt.Errorf("%s has no prototype", funcName) + } + + return proto, nil +} + +// GetHasUsernamespaceFirstArgWithBtf uses BTF to check if the security_inode_setattr function has a user namespace as its first argument +func GetHasUsernamespaceFirstArgWithBtf() (bool, error) { + proto, err := getBTFFuncProto("security_inode_setattr") + if err != nil { + return false, err } if len(proto.Params) == 0 { @@ -72,3 +82,21 @@ func GetHasUsernamespaceFirstArgWithBtf() (bool, error) { return proto.Params[0].Name != "dentry", nil } + +// GetHasVFSRenameStructArgs uses BTF to check if the vfs_rename function has a struct renamedata as its only argument +func GetHasVFSRenameStructArgs() (bool, error) { + proto, err := getBTFFuncProto("vfs_rename") + if err != nil { + return false, err + } + + if len(proto.Params) == 0 { + return false, errors.New("vfs_rename has no parameters") + } + + if len(proto.Params) == 1 && proto.Params[0].Name == "rd" { + return true, nil + } + + return false, nil +} diff --git a/pkg/security/probe/constantfetch/available_unsupported.go b/pkg/security/probe/constantfetch/available_unsupported.go index b23f04bf8acaf..3e003cbc28601 100644 --- a/pkg/security/probe/constantfetch/available_unsupported.go +++ b/pkg/security/probe/constantfetch/available_unsupported.go @@ -35,7 +35,12 @@ func GetAvailableConstantFetchers(_ *config.Config, kv *kernel.Version, _ statsd return fetchers } -// GetHasUsernamespaceFirstArgWithBtf uses BTF to check if the security_inode_setattr function has a user namespace as its first argument +// GetHasUsernamespaceFirstArgWithBtf not available func GetHasUsernamespaceFirstArgWithBtf() (bool, error) { return false, errors.New("unsupported BTF request") } + +// GetHasVFSRenameStructArgs not available +func GetHasVFSRenameStructArgs() (bool, error) { + return false, errors.New("unsupported BTF request") +} diff --git a/pkg/security/probe/constantfetch/btfhub/constants.json b/pkg/security/probe/constantfetch/btfhub/constants.json index 4e70d557da035..192841c9f4c6a 100644 --- a/pkg/security/probe/constantfetch/btfhub/constants.json +++ b/pkg/security/probe/constantfetch/btfhub/constants.json @@ -11869,6 +11869,13 @@ "uname_release": "4.14.352-268.568.amzn2.aarch64", "cindex": 3 }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.352-268.569.amzn2.aarch64", + "cindex": 3 + }, { "distrib": "amzn", "version": "2", @@ -12681,6 +12688,13 @@ "uname_release": "4.14.352-268.568.amzn2.x86_64", "cindex": 8 }, + { + "distrib": "amzn", + "version": "2", + "arch": "x86_64", + "uname_release": "4.14.352-268.569.amzn2.x86_64", + "cindex": 8 + }, { "distrib": "amzn", "version": "2", @@ -18288,6 +18302,13 @@ "uname_release": "4.14.35-2047.540.4.2.el7uek.aarch64", "cindex": 89 }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.541.4.1.el7uek.aarch64", + "cindex": 89 + }, { "distrib": "ol", "version": "7", @@ -23783,6 +23804,20 @@ "uname_release": "4.14.35-2047.541.3.el7uek.x86_64", "cindex": 96 }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.541.4.1.el7uek.x86_64", + "cindex": 96 + }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.542.1.el7uek.x86_64", + "cindex": 96 + }, { "distrib": "ol", "version": "7", diff --git a/pkg/security/probe/constantfetch/constant_names.go b/pkg/security/probe/constantfetch/constant_names.go index b97e9de3d8e21..764d294840e07 100644 --- a/pkg/security/probe/constantfetch/constant_names.go +++ b/pkg/security/probe/constantfetch/constant_names.go @@ -30,6 +30,10 @@ const ( OffsetNameFileFpath = "file_f_path_offset" OffsetNameMountMntID = "mount_id_offset" + // rename + OffsetNameRenameStructOldDentry = "vfs_rename_src_dentry_offset" + OffsetNameRenameStructNewDentry = "vfs_rename_target_dentry_offset" + // tracepoints OffsetNameSchedProcessForkParentPid = "sched_process_fork_parent_pid_offset" OffsetNameSchedProcessForkChildPid = "sched_process_fork_child_pid_offset" diff --git a/pkg/security/probe/constantfetch/fallback.go b/pkg/security/probe/constantfetch/fallback.go index 4f5eeebca0a52..468a5f54c84d4 100644 --- a/pkg/security/probe/constantfetch/fallback.go +++ b/pkg/security/probe/constantfetch/fallback.go @@ -141,6 +141,10 @@ func (f *FallbackConstantFetcher) appendRequest(id string) { value = getFileFpathOffset(f.kernelVersion) case OffsetNameMountMntID: value = getMountIDOffset(f.kernelVersion) + case OffsetNameRenameStructOldDentry: + value = getRenameStructOldDentryOffset(f.kernelVersion) + case OffsetNameRenameStructNewDentry: + value = getRenameStructNewDentryOffset(f.kernelVersion) } f.res[id] = value } @@ -1023,3 +1027,11 @@ func getMountIDOffset(kv *kernel.Version) uint64 { func getNetDeviceNameOffset(_ *kernel.Version) uint64 { return 0 } + +func getRenameStructOldDentryOffset(_ *kernel.Version) uint64 { + return 16 +} + +func getRenameStructNewDentryOffset(_ *kernel.Version) uint64 { + return 40 +} diff --git a/pkg/security/probe/probe_ebpf.go b/pkg/security/probe/probe_ebpf.go index 7ea35bdc91802..38b904195e735 100644 --- a/pkg/security/probe/probe_ebpf.go +++ b/pkg/security/probe/probe_ebpf.go @@ -545,7 +545,7 @@ func (p *EBPFProbe) unmarshalContexts(data []byte, event *model.Event) (int, err } func eventWithNoProcessContext(eventType model.EventType) bool { - return eventType == model.DNSEventType || eventType == model.IMDSEventType || eventType == model.LoadModuleEventType || eventType == model.UnloadModuleEventType + return eventType == model.DNSEventType || eventType == model.IMDSEventType || eventType == model.RawPacketEventType || eventType == model.LoadModuleEventType || eventType == model.UnloadModuleEventType } func (p *EBPFProbe) unmarshalProcessCacheEntry(ev *model.Event, data []byte) (int, error) { @@ -1255,11 +1255,13 @@ func (p *EBPFProbe) isNeededForSecurityProfile(eventType eval.EventType) bool { } func (p *EBPFProbe) validEventTypeForConfig(eventType string) bool { - if eventType == "dns" && !p.config.Probe.NetworkEnabled { - return false - } - if eventType == "imds" && (!p.config.Probe.NetworkEnabled || !p.config.Probe.NetworkIngressEnabled) { - return false + switch eventType { + case "dns": + return p.probe.IsNetworkEnabled() + case "imds": + return p.probe.IsNetworkEnabled() && p.config.Probe.NetworkIngressEnabled + case "packet": + return p.probe.IsNetworkRawPacketEnabled() } return true } @@ -1875,7 +1877,7 @@ func NewEBPFProbe(probe *Probe, config *config.Config, opts Opts, telemetry tele }, manager.ConstantEditor{ Name: "vfs_rename_input_type", - Value: mount.GetVFSRenameInputType(p.kernelVersion), + Value: getVFSRenameRegisterArgsOrStruct(p.kernelVersion), }, manager.ConstantEditor{ Name: "check_helper_call_input", @@ -2074,6 +2076,24 @@ func getHasUsernamespaceFirstArg(kernelVersion *kernel.Version) uint64 { } } +func getVFSRenameRegisterArgsOrStruct(kernelVersion *kernel.Version) uint64 { + if val, err := constantfetch.GetHasVFSRenameStructArgs(); err == nil { + if val { + return 2 + } + return 1 + } + + if kernelVersion.Code >= kernel.Kernel5_12 { + return 2 + } + if kernelVersion.IsInRangeCloseOpen(kernel.Kernel5_10, kernel.Kernel5_11) && kernelVersion.Code.Patch() >= 220 { + return 2 + } + + return 1 +} + func getOvlPathInOvlInode(kernelVersion *kernel.Version) uint64 { // https://github.com/torvalds/linux/commit/0af950f57fefabab628f1963af881e6b9bfe7f38 if kernelVersion.Code != 0 && kernelVersion.Code >= kernel.Kernel6_5 { @@ -2155,6 +2175,12 @@ func AppendProbeRequestsToFetcher(constantFetcher constantfetch.ConstantFetcher, constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameKernelCloneArgsExitSignal, "struct kernel_clone_args", "exit_signal", "linux/sched/task.h") } + // rename offsets + if kv.Code >= kernel.Kernel5_12 || (kv.IsInRangeCloseOpen(kernel.Kernel5_10, kernel.Kernel5_11) && kv.Code.Patch() >= 220) { + constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameRenameStructOldDentry, "struct renamedata", "old_dentry", "linux/fs.h") + constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameRenameStructNewDentry, "struct renamedata", "new_dentry", "linux/fs.h") + } + // bpf offsets constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameBPFMapStructID, "struct bpf_map", "id", "linux/bpf.h") if kv.Code != 0 && (kv.Code >= kernel.Kernel4_15 || kv.IsRH7Kernel()) { diff --git a/pkg/security/probe/process_killer.go b/pkg/security/probe/process_killer.go index 4e61f1d147418..72912286c76f5 100644 --- a/pkg/security/probe/process_killer.go +++ b/pkg/security/probe/process_killer.go @@ -174,6 +174,12 @@ func (p *ProcessKiller) KillAndReport(kill *rules.KillDefinition, rule *rules.Ru return false } + scope := "process" + switch kill.Scope { + case "container", "process": + scope = kill.Scope + } + if p.useDisarmers.Load() { var disarmer *ruleDisarmer p.ruleDisarmersLock.Lock() @@ -184,13 +190,27 @@ func (p *ProcessKiller) KillAndReport(kill *rules.KillDefinition, rule *rules.Ru } p.ruleDisarmersLock.Unlock() + onActionBlockedByDisarmer := func(dt disarmerType) { + seclog.Warnf("skipping kill action of rule `%s` because it has been disarmed", rule.ID) + ev.ActionReports = append(ev.ActionReports, &KillActionReport{ + Scope: scope, + Signal: kill.Signal, + Status: KillActionStatusRuleDisarmed, + DisarmerType: string(dt), + CreatedAt: ev.ProcessContext.ExecTime, + DetectedAt: ev.ResolveEventTime(), + Pid: ev.ProcessContext.Pid, + rule: rule, + }) + } + if disarmer.container.enabled { if containerID := ev.FieldHandlers.ResolveContainerID(ev, ev.ContainerContext); containerID != "" { if !disarmer.allow(disarmer.containerCache, containerID, func() { disarmer.disarmedCount[containerDisarmerType]++ seclog.Warnf("disarming kill action of rule `%s` because more than %d different containers triggered it in the last %s", rule.ID, disarmer.container.capacity, disarmer.container.period) }) { - seclog.Warnf("skipping kill action of rule `%s` because it has been disarmed", rule.ID) + onActionBlockedByDisarmer(containerDisarmerType) return false } } @@ -202,18 +222,12 @@ func (p *ProcessKiller) KillAndReport(kill *rules.KillDefinition, rule *rules.Ru disarmer.disarmedCount[executableDisarmerType]++ seclog.Warnf("disarmed kill action of rule `%s` because more than %d different executables triggered it in the last %s", rule.ID, disarmer.executable.capacity, disarmer.executable.period) }) { - seclog.Warnf("skipping kill action of rule `%s` because it has been disarmed", rule.ID) + onActionBlockedByDisarmer(executableDisarmerType) return false } } } - scope := "process" - switch kill.Scope { - case "container", "process": - scope = kill.Scope - } - pids, paths, err := p.getProcesses(scope, ev, entry) if err != nil { log.Errorf("unable to kill: %s", err) @@ -255,10 +269,11 @@ func (p *ProcessKiller) KillAndReport(kill *rules.KillDefinition, rule *rules.Ru report := &KillActionReport{ Scope: scope, Signal: kill.Signal, - Pid: ev.ProcessContext.Pid, + Status: KillActionStatusPerformed, CreatedAt: ev.ProcessContext.ExecTime, DetectedAt: ev.ResolveEventTime(), KilledAt: killedAt, + Pid: ev.ProcessContext.Pid, rule: rule, } ev.ActionReports = append(ev.ActionReports, report) @@ -440,8 +455,8 @@ const ( type disarmerType string const ( - containerDisarmerType = disarmerType("container") - executableDisarmerType = disarmerType("executable") + containerDisarmerType disarmerType = "container" + executableDisarmerType disarmerType = "executable" ) type ruleDisarmer struct { diff --git a/pkg/security/ptracer/utils.go b/pkg/security/ptracer/utils.go index 72fc4f589750f..f313d02415800 100644 --- a/pkg/security/ptracer/utils.go +++ b/pkg/security/ptracer/utils.go @@ -132,7 +132,7 @@ func simpleHTTPRequest(uri string) ([]byte, error) { path = "/" } - req := fmt.Sprintf("GET %s?%s HTTP/1.1\nHost: %s\nConnection: close\n\n", path, u.RawQuery, u.Hostname()) + req := fmt.Sprintf("GET %s?%s HTTP/1.0\nHost: %s\nConnection: close\n\n", path, u.RawQuery, u.Hostname()) _, err = client.Write([]byte(req)) if err != nil { diff --git a/pkg/security/resolvers/mount/resolver.go b/pkg/security/resolvers/mount/resolver.go index 429b9ed0ca6d6..c35daaa588e88 100644 --- a/pkg/security/resolvers/mount/resolver.go +++ b/pkg/security/resolvers/mount/resolver.go @@ -571,17 +571,6 @@ func GetVFSRemovexattrDentryPosition(kernelVersion *skernel.Version) uint64 { return position } -// GetVFSRenameInputType gets VFS rename input type -func GetVFSRenameInputType(kernelVersion *skernel.Version) uint64 { - inputType := uint64(1) - - if kernelVersion.Code != 0 && kernelVersion.Code >= skernel.Kernel5_12 { - inputType = 2 - } - - return inputType -} - // SendStats sends metrics about the current state of the mount resolver func (mr *Resolver) SendStats() error { mr.lock.RLock() diff --git a/pkg/security/resolvers/sbom/file_querier.go b/pkg/security/resolvers/sbom/file_querier.go index 1009c076e0ebe..e1831cfbeaf05 100644 --- a/pkg/security/resolvers/sbom/file_querier.go +++ b/pkg/security/resolvers/sbom/file_querier.go @@ -9,6 +9,7 @@ package sbom import ( + "slices" "strings" "github.com/DataDog/datadog-agent/pkg/security/seclog" @@ -19,6 +20,8 @@ import ( type fileQuerier struct { files []uint64 pkgs []*Package + + lastNegativeCache *fixedSizeQueue[uint64] } /* @@ -64,10 +67,15 @@ func newFileQuerier(report *trivy.Report) fileQuerier { } } - return fileQuerier{files: files, pkgs: pkgs} + return fileQuerier{files: files, pkgs: pkgs, lastNegativeCache: newFixedSizeQueue[uint64](2)} } func (fq *fileQuerier) queryHash(hash uint64) *Package { + // fast path, if no package in the report contains the file + if !slices.Contains(fq.files, hash) { + return nil + } + var i, pkgIndex uint64 for i < uint64(len(fq.files)) { partSize := fq.files[i] @@ -85,13 +93,29 @@ func (fq *fileQuerier) queryHash(hash uint64) *Package { return nil } +func (fq *fileQuerier) queryHashWithNegativeCache(hash uint64) *Package { + if fq.lastNegativeCache.contains(hash) { + return nil + } + + pkg := fq.queryHash(hash) + if pkg == nil { + if fq.lastNegativeCache == nil { + fq.lastNegativeCache = newFixedSizeQueue[uint64](2) + } + fq.lastNegativeCache.push(hash) + } + + return pkg +} + func (fq *fileQuerier) queryFile(path string) *Package { - if pkg := fq.queryHash(murmur3.StringSum64(path)); pkg != nil { + if pkg := fq.queryHashWithNegativeCache(murmur3.StringSum64(path)); pkg != nil { return pkg } if strings.HasPrefix(path, "/usr") { - return fq.queryHash(murmur3.StringSum64(path[4:])) + return fq.queryHashWithNegativeCache(murmur3.StringSum64(path[4:])) } return nil @@ -100,3 +124,34 @@ func (fq *fileQuerier) queryFile(path string) *Package { func (fq *fileQuerier) len() int { return len(fq.files) } + +type fixedSizeQueue[T comparable] struct { + queue []T + maxSize int +} + +func newFixedSizeQueue[T comparable](maxSize int) *fixedSizeQueue[T] { + return &fixedSizeQueue[T]{maxSize: maxSize} +} + +func (q *fixedSizeQueue[T]) push(value T) { + if len(q.queue) == q.maxSize { + q.queue = q.queue[1:] + } + + q.queue = append(q.queue, value) +} + +func (q *fixedSizeQueue[T]) contains(value T) bool { + if q == nil { + return false + } + + for _, v := range q.queue { + if v == value { + return true + } + } + + return false +} diff --git a/pkg/security/secl/compiler/eval/context.go b/pkg/security/secl/compiler/eval/context.go index 874cafa9d72d1..f8775862623be 100644 --- a/pkg/security/secl/compiler/eval/context.go +++ b/pkg/security/secl/compiler/eval/context.go @@ -11,6 +11,12 @@ import ( "time" ) +// RegisterCacheEntry used to track the value +type RegisterCacheEntry struct { + Pos int + Value interface{} +} + // Context describes the context used during a rule evaluation type Context struct { Event Event @@ -20,6 +26,12 @@ type Context struct { IntCache map[string][]int BoolCache map[string][]bool + // iterator register cache. used to cache entry within a single rule evaluation + RegisterCache map[RegisterID]*RegisterCacheEntry + + // rule register + Registers map[RegisterID]int + now time.Time } @@ -41,25 +53,22 @@ func (c *Context) Reset() { c.Event = nil c.now = time.Time{} - // as the cache should be low in entry, prefer to delete than re-alloc - for key := range c.StringCache { - delete(c.StringCache, key) - } - for key := range c.IntCache { - delete(c.IntCache, key) - } - for key := range c.BoolCache { - delete(c.BoolCache, key) - } + clear(c.StringCache) + clear(c.IntCache) + clear(c.BoolCache) + clear(c.Registers) + clear(c.RegisterCache) } // NewContext return a new Context func NewContext(evt Event) *Context { return &Context{ - Event: evt, - StringCache: make(map[string][]string), - IntCache: make(map[string][]int), - BoolCache: make(map[string][]bool), + Event: evt, + StringCache: make(map[string][]string), + IntCache: make(map[string][]int), + BoolCache: make(map[string][]bool), + Registers: make(map[RegisterID]int), + RegisterCache: make(map[RegisterID]*RegisterCacheEntry), } } diff --git a/pkg/security/secl/compiler/eval/errors.go b/pkg/security/secl/compiler/eval/errors.go index 3c76bf0c86f9f..1be678c4b9fad 100644 --- a/pkg/security/secl/compiler/eval/errors.go +++ b/pkg/security/secl/compiler/eval/errors.go @@ -177,3 +177,12 @@ type ErrValueOutOfRange struct { func (e ErrValueOutOfRange) Error() string { return fmt.Sprintf("incorrect value for type `%s`, out of range", e.Field) } + +// ErrIteratorVariable error when the iterator variable constraints are reached +type ErrIteratorVariable struct { + Err error +} + +func (e ErrIteratorVariable) Error() string { + return fmt.Sprintf("iterator variable error: %s", e.Err) +} diff --git a/pkg/security/secl/compiler/eval/eval.go b/pkg/security/secl/compiler/eval/eval.go index cf2ccab373ad3..f417cbd82ee05 100644 --- a/pkg/security/secl/compiler/eval/eval.go +++ b/pkg/security/secl/compiler/eval/eval.go @@ -12,6 +12,7 @@ import ( "fmt" "reflect" "regexp" + "slices" "strconv" "strings" @@ -75,7 +76,7 @@ func identToEvaluator(obj *ident, opts *Opts, state *State) (interface{}, lexer. } } - field, _, regID, err := extractField(*obj.Ident, state) + field, itField, regID, err := extractField(*obj.Ident, state) if err != nil { return nil, obj.Pos, err } @@ -94,6 +95,26 @@ func identToEvaluator(obj *ident, opts *Opts, state *State) (interface{}, lexer. state.UpdateFields(field) + if regID != "" { + // avoid wildcard register for the moment + if regID == "_" { + return nil, obj.Pos, NewError(obj.Pos, "`_` can't be used as a iterator variable name") + } + + // avoid using the same register on two different fields + if slices.ContainsFunc(state.registers, func(r Register) bool { + return r.ID == regID && r.Field != itField + }) { + return nil, obj.Pos, NewError(obj.Pos, "iterator variable used by different fields '%s'", regID) + } + + if !slices.ContainsFunc(state.registers, func(r Register) bool { + return r.ID == regID + }) { + state.registers = append(state.registers, Register{ID: regID, Field: itField}) + } + } + return accessor, obj.Pos, nil } diff --git a/pkg/security/secl/compiler/eval/eval_test.go b/pkg/security/secl/compiler/eval/eval_test.go index f99e6df8816b5..86dd87126133d 100644 --- a/pkg/security/secl/compiler/eval/eval_test.go +++ b/pkg/security/secl/compiler/eval/eval_test.go @@ -817,13 +817,15 @@ func TestRegisterSyntaxError(t *testing.T) { Expr string Expected bool }{ - {Expr: `process.list[_].key == 10 && process.list[_].value == "AAA"`, Expected: true}, + {Expr: `process.list[_].key == 10 && process.list[_].value == "AAA"`, Expected: false}, + {Expr: `process.list[A].key == 10 && process.list[A].value == "AAA"`, Expected: true}, + {Expr: `process.list[A].key == 10 && process.list[B].value == "AAA"`, Expected: false}, + {Expr: `process.list[A].key == 10 && process.array[A].value == "AAA"`, Expected: false}, {Expr: `process.list[].key == 10 && process.list.value == "AAA"`, Expected: false}, - {Expr: `process.list[_].key == 10 && process.list.value == "AAA"`, Expected: true}, + {Expr: `process.list[A].key == 10 && process.list.value == "AAA"`, Expected: true}, {Expr: `process.list.key[] == 10 && process.list.value == "AAA"`, Expected: false}, {Expr: `process[].list.key == 10 && process.list.value == "AAA"`, Expected: false}, {Expr: `[]process.list.key == 10 && process.list.value == "AAA"`, Expected: false}, - //{Expr: `process.list[A].key == 10 && process.list[A].value == "AAA" && process.array[B].key == 10 && process.array[B].value == "AAA"`, Expected: false}, } for _, test := range tests { @@ -853,85 +855,90 @@ func TestRegister(t *testing.T) { Expr string Expected bool }{ - {Expr: `process.list[_].key == 10`, Expected: true}, - {Expr: `process.list[_].key == 9999`, Expected: false}, - {Expr: `process.list[_].key != 10`, Expected: false}, - {Expr: `process.list[_].key != 9999`, Expected: true}, - - {Expr: `process.list[_].key >= 200`, Expected: true}, - {Expr: `process.list[_].key > 100`, Expected: true}, - {Expr: `process.list[_].key <= 200`, Expected: true}, - {Expr: `process.list[_].key < 100`, Expected: true}, - - {Expr: `10 == process.list[_].key`, Expected: true}, - {Expr: `9999 == process.list[_].key`, Expected: false}, - {Expr: `10 != process.list[_].key`, Expected: false}, - {Expr: `9999 != process.list[_].key`, Expected: true}, - - {Expr: `9999 in process.list[_].key`, Expected: false}, - {Expr: `9999 not in process.list[_].key`, Expected: true}, - {Expr: `10 in process.list[_].key`, Expected: true}, - {Expr: `10 not in process.list[_].key`, Expected: false}, - - {Expr: `process.list[_].key > 10`, Expected: true}, - {Expr: `process.list[_].key > 9999`, Expected: false}, - {Expr: `process.list[_].key < 10`, Expected: false}, - {Expr: `process.list[_].key < 9999`, Expected: true}, - - {Expr: `5 < process.list[_].key`, Expected: true}, - {Expr: `9999 < process.list[_].key`, Expected: false}, - {Expr: `10 > process.list[_].key`, Expected: false}, - {Expr: `9999 > process.list[_].key`, Expected: true}, - - {Expr: `true in process.array[_].flag`, Expected: true}, - {Expr: `false not in process.array[_].flag`, Expected: false}, - - {Expr: `process.array[_].flag == true`, Expected: true}, - {Expr: `process.array[_].flag != false`, Expected: false}, - - {Expr: `"AAA" in process.list[_].value`, Expected: true}, - {Expr: `"ZZZ" in process.list[_].value`, Expected: false}, - {Expr: `"AAA" not in process.list[_].value`, Expected: false}, - {Expr: `"ZZZ" not in process.list[_].value`, Expected: true}, - - {Expr: `~"AA*" in process.list[_].value`, Expected: true}, - {Expr: `~"ZZ*" in process.list[_].value`, Expected: false}, - {Expr: `~"AA*" not in process.list[_].value`, Expected: false}, - {Expr: `~"ZZ*" not in process.list[_].value`, Expected: true}, - - {Expr: `r"[A]{1,3}" in process.list[_].value`, Expected: true}, - {Expr: `process.list[_].value in [r"[A]{1,3}", "nnnnn"]`, Expected: true}, - - {Expr: `process.list[_].value == ~"AA*"`, Expected: true}, - {Expr: `process.list[_].value == ~"ZZ*"`, Expected: false}, - {Expr: `process.list[_].value != ~"AA*"`, Expected: false}, - {Expr: `process.list[_].value != ~"ZZ*"`, Expected: true}, - - {Expr: `process.list[_].value =~ "AA*"`, Expected: true}, - {Expr: `process.list[_].value =~ "ZZ*"`, Expected: false}, - {Expr: `process.list[_].value !~ "AA*"`, Expected: false}, - {Expr: `process.list[_].value !~ "ZZ*"`, Expected: true}, - - {Expr: `process.list[_].value in ["~zzzz", ~"AA*", "nnnnn"]`, Expected: true}, - {Expr: `process.list[_].value in ["~zzzz", ~"AA*", "nnnnn"]`, Expected: true}, - {Expr: `process.list[_].value in ["~zzzz", "AAA", "nnnnn"]`, Expected: true}, - {Expr: `process.list[_].value in ["~zzzz", "AA*", "nnnnn"]`, Expected: false}, - - {Expr: `process.list[_].value in [~"ZZ*", "nnnnn"]`, Expected: false}, - {Expr: `process.list[_].value not in [~"AA*", "nnnnn"]`, Expected: false}, - {Expr: `process.list[_].value not in [~"ZZ*", "nnnnn"]`, Expected: true}, - {Expr: `process.list[_].value not in [~"ZZ*", "AAA", "nnnnn"]`, Expected: false}, - {Expr: `process.list[_].value not in [~"ZZ*", ~"AA*", "nnnnn"]`, Expected: false}, - - {Expr: `process.list[_].key == 10 && process.list[_].value == "AAA"`, Expected: true}, - {Expr: `process.list[_].key == 9999 && process.list[_].value == "AAA"`, Expected: false}, - {Expr: `process.list[_].key == 100 && process.list[_].value == "BBB"`, Expected: true}, - {Expr: `process.list[_].key == 200 && process.list[_].value == "CCC"`, Expected: true}, + {Expr: `process.list[A].key == 10`, Expected: true}, + {Expr: `process.list[A].key == 9999`, Expected: false}, + {Expr: `process.list[A].key != 10`, Expected: true}, + {Expr: `process.list.key != 10`, Expected: false}, + {Expr: `process.list[A].key != 9999`, Expected: true}, + {Expr: `process.list[A].key >= 200`, Expected: true}, + {Expr: `process.list[A].key > 100`, Expected: true}, + {Expr: `process.list[A].key <= 200`, Expected: true}, + {Expr: `process.list[A].key < 100`, Expected: true}, + + {Expr: `10 == process.list[A].key`, Expected: true}, + {Expr: `9999 == process.list[A].key`, Expected: false}, + {Expr: `10 != process.list[A].key`, Expected: true}, + {Expr: `9999 != process.list[A].key`, Expected: true}, + + {Expr: `9999 in process.list[A].key`, Expected: false}, + {Expr: `9999 not in process.list[A].key`, Expected: true}, + {Expr: `10 in process.list[A].key`, Expected: true}, + {Expr: `10 not in process.list[A].key`, Expected: true}, + + {Expr: `process.list[A].key > 10`, Expected: true}, + {Expr: `process.list[A].key > 9999`, Expected: false}, + {Expr: `process.list[A].key < 10`, Expected: false}, + {Expr: `process.list[A].key < 9999`, Expected: true}, + + {Expr: `5 < process.list[A].key`, Expected: true}, + {Expr: `9999 < process.list[A].key`, Expected: false}, + {Expr: `10 > process.list[A].key`, Expected: false}, + {Expr: `9999 > process.list[A].key`, Expected: true}, + + {Expr: `true in process.array[A].flag`, Expected: true}, + {Expr: `false not in process.array[A].flag`, Expected: false}, + + {Expr: `process.array[A].flag == true`, Expected: true}, + {Expr: `process.array[A].flag != false`, Expected: false}, + + {Expr: `"AAA" in process.list[A].value`, Expected: true}, + {Expr: `"ZZZ" in process.list[A].value`, Expected: false}, + {Expr: `"AAA" not in process.list[A].value`, Expected: true}, + {Expr: `"ZZZ" not in process.list[A].value`, Expected: true}, + + {Expr: `~"AA*" in process.list[A].value`, Expected: true}, + {Expr: `~"ZZ*" in process.list[A].value`, Expected: false}, + {Expr: `~"AA*" not in process.list[A].value`, Expected: true}, + {Expr: `~"ZZ*" not in process.list[A].value`, Expected: true}, + + {Expr: `r"[A]{1,3}" in process.list[A].value`, Expected: true}, + {Expr: `process.list[A].value in [r"[A]{1,3}", "nnnnn"]`, Expected: true}, + + {Expr: `process.list[A].value == ~"AA*"`, Expected: true}, + {Expr: `process.list[A].value == ~"ZZ*"`, Expected: false}, + {Expr: `process.list[A].value != ~"AA*"`, Expected: true}, + {Expr: `process.list[A].value != ~"ZZ*"`, Expected: true}, + + {Expr: `process.list[A].value =~ "AA*"`, Expected: true}, + {Expr: `process.list[A].value =~ "ZZ*"`, Expected: false}, + {Expr: `process.list[A].value !~ "AA*"`, Expected: true}, + {Expr: `process.list[A].value !~ "ZZ*"`, Expected: true}, + + {Expr: `process.list[A].value in ["~zzzz", ~"AA*", "nnnnn"]`, Expected: true}, + {Expr: `process.list[A].value in ["~zzzz", ~"AA*", "nnnnn"]`, Expected: true}, + {Expr: `process.list[A].value in ["~zzzz", "AAA", "nnnnn"]`, Expected: true}, + {Expr: `process.list[A].value in ["~zzzz", "AA*", "nnnnn"]`, Expected: false}, + + {Expr: `process.list[A].value in [~"ZZ*", "nnnnn"]`, Expected: false}, + {Expr: `process.list[A].value not in [~"AA*", "nnnnn"]`, Expected: true}, + {Expr: `process.list[A].value not in [~"ZZ*", "nnnnn"]`, Expected: true}, + {Expr: `process.list[A].value not in [~"ZZ*", "AAA", "nnnnn"]`, Expected: true}, + {Expr: `process.list[A].value not in [~"ZZ*", ~"AA*", "nnnnn"]`, Expected: true}, + + {Expr: `process.list[A].key == 10 && process.list[A].value == "AAA"`, Expected: true}, + {Expr: `process.list[A].key == 9999 && process.list[A].value == "AAA"`, Expected: false}, + {Expr: `process.list[A].key == 100 && process.list[A].value == "BBB"`, Expected: true}, + {Expr: `process.list[A].key == 200 && process.list[A].value == "CCC"`, Expected: true}, {Expr: `process.list.key == 200 && process.list.value == "AAA"`, Expected: true}, - {Expr: `process.list[_].key == 10 && process.list.value == "AAA"`, Expected: true}, + {Expr: `process.list[A].key == 10 && process.list[A].value == "AAA"`, Expected: true}, + {Expr: `process.list[A].key == 10 && process.list[A].value == "BBB"`, Expected: false}, + {Expr: `process.list[A].key == 100 && process.list[A].value == "BBB"`, Expected: true}, + {Expr: `process.list.key == 10 && process.list.value == "BBB"`, Expected: true}, - {Expr: `process.array[_].key == 1000 && process.array[_].value == "EEEE"`, Expected: true}, - {Expr: `process.array[_].key == 1002 && process.array[_].value == "EEEE"`, Expected: true}, + {Expr: `process.array[A].key == 1000 && process.array[A].value == "EEEE"`, Expected: true}, + {Expr: `process.array[A].key == 1002 && process.array[A].value == "EEEE"`, Expected: false}, + + {Expr: `process.array[A].key == 1000`, Expected: true}, } for _, test := range tests { @@ -966,12 +973,11 @@ func TestRegisterPartial(t *testing.T) { Field Field IsDiscarder bool }{ - {Expr: `process.list[_].key == 10 && process.list[_].value == "AA"`, Field: "process.list.key", IsDiscarder: false}, - {Expr: `process.list[_].key == 55 && process.list[_].value == "AA"`, Field: "process.list.key", IsDiscarder: true}, - {Expr: `process.list[_].key == 55 && process.list[_].value == "AA"`, Field: "process.list.value", IsDiscarder: false}, - {Expr: `process.list[_].key == 10 && process.list[_].value == "ZZZ"`, Field: "process.list.value", IsDiscarder: true}, - //{Expr: `process.list[A].key == 10 && process.list[B].value == "ZZZ"`, Field: "process.list.key", IsDiscarder: false}, - //{Expr: `process.list[A].key == 55 && process.list[B].value == "AA"`, Field: "process.list.key", IsDiscarder: true}, + {Expr: `process.list[A].key == 10 && process.list[A].value == "AA"`, Field: "process.list.key", IsDiscarder: false}, + {Expr: `process.list[A].key == 55 && process.list[A].value == "AA"`, Field: "process.list.key", IsDiscarder: true}, + {Expr: `process.list[A].key in [55, 10] && process.list[A].value == "AA"`, Field: "process.list.key", IsDiscarder: false}, + {Expr: `process.list[A].key == 55 && process.list[A].value == "AA"`, Field: "process.list.value", IsDiscarder: false}, + {Expr: `process.list[A].key == 10 && process.list[A].value == "ZZZ"`, Field: "process.list.value", IsDiscarder: true}, } ctx := NewContext(event) @@ -1012,8 +1018,8 @@ func TestOptimizer(t *testing.T) { Expr string Evaluated func() bool }{ - {Expr: `process.list[_].key == 44 && process.gid == 55`, Evaluated: func() bool { return event.listEvaluated }}, - {Expr: `process.gid == 55 && process.list[_].key == 44`, Evaluated: func() bool { return event.listEvaluated }}, + {Expr: `process.list[A].key == 44 && process.gid == 55`, Evaluated: func() bool { return event.listEvaluated }}, + {Expr: `process.gid == 55 && process.list[A].key == 44`, Evaluated: func() bool { return event.listEvaluated }}, {Expr: `process.uid in [66, 77, 88] && process.gid == 55`, Evaluated: func() bool { return event.uidEvaluated }}, {Expr: `process.gid == 55 && process.uid in [66, 77, 88]`, Evaluated: func() bool { return event.uidEvaluated }}, } diff --git a/pkg/security/secl/compiler/eval/model.go b/pkg/security/secl/compiler/eval/model.go index de27aa6422f0b..ae8ea4bf2e790 100644 --- a/pkg/security/secl/compiler/eval/model.go +++ b/pkg/security/secl/compiler/eval/model.go @@ -11,8 +11,6 @@ type Model interface { GetEvaluator(field Field, regID RegisterID) (Evaluator, error) // ValidateField returns whether the value use against the field is valid, ex: for constant ValidateField(field Field, value FieldValue) error - // GetIterator return an iterator - //GetIterator(field Field) (Iterator, error) // NewEvent returns a new event instance NewEvent() Event // GetFieldRestrictions returns the event type for which the field is available diff --git a/pkg/security/secl/compiler/eval/model_test.go b/pkg/security/secl/compiler/eval/model_test.go index 337c5190e7ef3..6b8e0ed0f1ba8 100644 --- a/pkg/security/secl/compiler/eval/model_test.go +++ b/pkg/security/secl/compiler/eval/model_test.go @@ -111,7 +111,22 @@ func (m *testModel) GetFieldRestrictions(_ Field) []EventType { return nil } -func (m *testModel) GetEvaluator(field Field, _ RegisterID) (Evaluator, error) { +func (m *testModel) GetIteratorLen(field Field) (func(ctx *Context) int, error) { + switch field { + + case "process.list": + return func(ctx *Context) int { + return ctx.Event.(*testEvent).process.list.Len() + }, nil + case "process.array": + return func(ctx *Context) int { + return len(ctx.Event.(*testEvent).process.array) + }, nil + } + return nil, &ErrFieldNotFound{Field: field} +} + +func (m *testModel) GetEvaluator(field Field, regID RegisterID) (Evaluator, error) { switch field { case "network.ip": @@ -209,8 +224,39 @@ func (m *testModel) GetEvaluator(field Field, _ RegisterID) (Evaluator, error) { Field: field, }, nil + case "process.list.length": + return &IntEvaluator{ + EvalFnc: func(ctx *Context) int { + return ctx.Event.(*testEvent).process.list.Len() + }, + Field: field, + }, nil + case "process.list.key": + if regID != "" { + return &IntArrayEvaluator{ + EvalFnc: func(ctx *Context) []int { + idx := ctx.Registers[regID] + + var i int + + el := ctx.Event.(*testEvent).process.list.Front() + for el != nil { + if i == idx { + return []int{el.Value.(*testItem).key} + } + el = el.Next() + i++ + } + + return nil + }, + Field: field, + Weight: IteratorWeight, + }, nil + } + return &IntArrayEvaluator{ EvalFnc: func(ctx *Context) []int { // to test optimisation @@ -232,6 +278,29 @@ func (m *testModel) GetEvaluator(field Field, _ RegisterID) (Evaluator, error) { case "process.list.value": + if regID != "" { + return &StringArrayEvaluator{ + EvalFnc: func(ctx *Context) []string { + idx := ctx.Registers[regID] + + var i int + + el := ctx.Event.(*testEvent).process.list.Front() + for el != nil { + if i == idx { + return []string{el.Value.(*testItem).value} + } + el = el.Next() + i++ + } + + return nil + }, + Field: field, + Weight: IteratorWeight, + }, nil + } + return &StringArrayEvaluator{ EvalFnc: func(ctx *Context) []string { // to test optimisation @@ -253,6 +322,29 @@ func (m *testModel) GetEvaluator(field Field, _ RegisterID) (Evaluator, error) { case "process.list.flag": + if regID != "" { + return &BoolArrayEvaluator{ + EvalFnc: func(ctx *Context) []bool { + idx := ctx.Registers[regID] + + var i int + + el := ctx.Event.(*testEvent).process.list.Front() + for el != nil { + if i == idx { + return []bool{el.Value.(*testItem).flag} + } + el = el.Next() + i++ + } + + return nil + }, + Field: field, + Weight: IteratorWeight, + }, nil + } + return &BoolArrayEvaluator{ EvalFnc: func(ctx *Context) []bool { // to test optimisation @@ -272,8 +364,28 @@ func (m *testModel) GetEvaluator(field Field, _ RegisterID) (Evaluator, error) { Weight: IteratorWeight, }, nil + case "process.array.length": + return &IntEvaluator{ + EvalFnc: func(ctx *Context) int { + return len(ctx.Event.(*testEvent).process.array) + }, + Field: field, + }, nil + case "process.array.key": + if regID != "" { + return &IntArrayEvaluator{ + EvalFnc: func(ctx *Context) []int { + idx := ctx.Registers[regID] + + return []int{ctx.Event.(*testEvent).process.array[idx].key} + }, + Field: field, + Weight: IteratorWeight, + }, nil + } + return &IntArrayEvaluator{ EvalFnc: func(ctx *Context) []int { var result []int @@ -290,6 +402,18 @@ func (m *testModel) GetEvaluator(field Field, _ RegisterID) (Evaluator, error) { case "process.array.value": + if regID != "" { + return &StringArrayEvaluator{ + EvalFnc: func(ctx *Context) []string { + idx := ctx.Registers[regID] + + return []string{ctx.Event.(*testEvent).process.array[idx].value} + }, + Field: field, + Weight: IteratorWeight, + }, nil + } + return &StringArrayEvaluator{ EvalFnc: func(ctx *Context) []string { var values []string diff --git a/pkg/security/secl/compiler/eval/registers.go b/pkg/security/secl/compiler/eval/registers.go index af7adebf69416..71892d02cc498 100644 --- a/pkg/security/secl/compiler/eval/registers.go +++ b/pkg/security/secl/compiler/eval/registers.go @@ -7,3 +7,9 @@ package eval // RegisterID identify a register ID type RegisterID = string + +// Register defines an eval register +type Register struct { + ID RegisterID + Field Field +} diff --git a/pkg/security/secl/compiler/eval/rule.go b/pkg/security/secl/compiler/eval/rule.go index 14509112fc847..cb130f767676b 100644 --- a/pkg/security/secl/compiler/eval/rule.go +++ b/pkg/security/secl/compiler/eval/rule.go @@ -7,6 +7,7 @@ package eval import ( + "errors" "fmt" "reflect" "slices" @@ -15,6 +16,10 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/secl/utils" ) +const ( + maxRegisterIteration = 100 +) + // RuleID - ID of a Rule type RuleID = string @@ -43,6 +48,8 @@ type RuleEvaluator struct { fields []Field partialEvals map[Field]BoolEvalFnc + + registers []Register } // NewRule returns a new rule @@ -217,11 +224,52 @@ func NewRuleEvaluator(rule *ast.Rule, model Model, opts *Opts) (*RuleEvaluator, } } + // handle rule with iterator registers + if len(state.registers) > 0 { + // NOTE: limit to only one register for now to avoid computation and evaluation + // of all the combination + if len(state.registers) > 1 { + return nil, &ErrIteratorVariable{Err: errors.New("iterator variable limit to one per rule")} + } + + regID, field := state.registers[0].ID, state.registers[0].Field + lenEval, err := model.GetEvaluator(field+".length", regID) + if err != nil { + return nil, &ErrIteratorVariable{Err: err} + } + + evalBoolFnc := evalBool.EvalFnc + + // eval with each possible value of the registers + evalBool.EvalFnc = func(ctx *Context) bool { + size := lenEval.Eval(ctx).(int) + if size > maxRegisterIteration { + size = maxRegisterIteration + } + + for i := 0; i != size; i++ { + ctx.Registers[regID] = i + if evalBoolFnc(ctx) { + // invalidate the cache + clear(ctx.RegisterCache) + + return true + } + + // invalidate the cache + clear(ctx.RegisterCache) + } + + return false + } + } + return &RuleEvaluator{ Eval: evalBool.EvalFnc, EventType: eventType, fieldValues: state.fieldValues, fields: KeysOfMap(state.fieldValues), + registers: state.registers, }, nil } diff --git a/pkg/security/secl/compiler/eval/state.go b/pkg/security/secl/compiler/eval/state.go index a893419c15706..b50fb42d7fe7c 100644 --- a/pkg/security/secl/compiler/eval/state.go +++ b/pkg/security/secl/compiler/eval/state.go @@ -20,10 +20,10 @@ type StateRegexpCache struct { type State struct { model Model field Field - events map[EventType]bool fieldValues map[Field][]FieldValue macros map[MacroID]*MacroEvaluator regexpCache StateRegexpCache + registers []Register } // UpdateFields updates the fields used in the rule @@ -60,7 +60,6 @@ func NewState(model Model, field Field, macros map[MacroID]*MacroEvaluator) *Sta field: field, macros: macros, model: model, - events: make(map[EventType]bool), fieldValues: make(map[Field][]FieldValue), } } diff --git a/pkg/security/secl/compiler/generators/accessors/accessors.go b/pkg/security/secl/compiler/generators/accessors/accessors.go index fa03839b3bba2..f1cc6fe601060 100644 --- a/pkg/security/secl/compiler/generators/accessors/accessors.go +++ b/pkg/security/secl/compiler/generators/accessors/accessors.go @@ -248,6 +248,23 @@ func handleNonEmbedded(module *common.Module, field seclField, prefixedFieldName } } +func addLengthOpField(module *common.Module, alias string, field *common.StructField) *common.StructField { + lengthField := *field + lengthField.IsLength = true + lengthField.Name += ".length" + lengthField.OrigType = "int" + lengthField.BasicType = "int" + lengthField.ReturnType = "int" + lengthField.Struct = "string" + lengthField.AliasPrefix = alias + lengthField.Alias = alias + ".length" + lengthField.CommentText = doc.SECLDocForLength + + module.Fields[lengthField.Alias] = &lengthField + + return &lengthField +} + // handleIterator adds iterator to list of exposed SECL iterators of the module func handleIterator(module *common.Module, field seclField, fieldType, iterator, aliasPrefix, prefixedFieldName, event string, restrictedTo []string, fieldCommentText, opOverrides string, isPointer, isArray bool) *common.StructField { alias := field.name @@ -272,6 +289,10 @@ func handleIterator(module *common.Module, field seclField, fieldType, iterator, RestrictedTo: restrictedTo, } + lengthField := addLengthOpField(module, alias, module.Iterators[alias]) + lengthField.Iterator = module.Iterators[alias] + lengthField.IsIterator = true + return module.Iterators[alias] } @@ -311,22 +332,10 @@ func handleFieldWithHandler(module *common.Module, field seclField, aliasPrefix, Ref: field.ref, RestrictedTo: restrictedTo, } - module.Fields[alias] = newStructField if field.lengthField { - var lengthField = *module.Fields[alias] - lengthField.IsLength = true - lengthField.Name += ".length" - lengthField.OrigType = "int" - lengthField.BasicType = "int" - lengthField.ReturnType = "int" - lengthField.Struct = "string" - lengthField.AliasPrefix = alias - lengthField.Alias = alias + ".length" - lengthField.CommentText = doc.SECLDocForLength - - module.Fields[lengthField.Alias] = &lengthField + addLengthOpField(module, alias, module.Fields[alias]) } if _, ok := module.EventTypes[event]; !ok { diff --git a/pkg/security/secl/compiler/generators/accessors/accessors.tmpl b/pkg/security/secl/compiler/generators/accessors/accessors.tmpl index dc1d890541ab3..c9c256021d3b5 100644 --- a/pkg/security/secl/compiler/generators/accessors/accessors.tmpl +++ b/pkg/security/secl/compiler/generators/accessors/accessors.tmpl @@ -65,7 +65,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval {{- if $Field.OpOverrides}} OpOverrides: {{$Field.OpOverrides}}, {{- end}} - {{- if $Field.Iterator}} + {{- if and $Field.Iterator (not $Field.IsIterator) }} EvalFnc: func(ctx *eval.Context) []{{$Field.ReturnType}} { {{if $Field.Handler}} ev := ctx.Event.(*Event) @@ -81,6 +81,56 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval iterator := &{{$Field.Iterator.ReturnType}}{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + + {{if $Field.Iterator.IsOrigTypePtr}} + element := value + {{else}} + element := *value + {{end}} + + {{range $Check := $Checks}} + {{if $Field.Iterator.Name | HasPrefix $Check}} + {{$SubName := $Field.Iterator.Name | TrimPrefix $Check}} + {{$Check = $SubName | printf "element%s"}} + if !{{$Check}}() { + return append(results, {{$Field.GetDefaultScalarReturnValue}}) + } + {{end}} + {{end}} + + {{$SubName := $Field.Iterator.Name | TrimPrefix $Field.Name}} + + {{$Return := $SubName | printf "element%s"}} + {{if $Field.Handler }} + {{$SubName = $Field.Iterator.Name | TrimPrefix $Field.Prefix}} + {{$Handler := $Field.Iterator.Name | TrimPrefix $Field.Handler}} + {{$Return = print "ev.FieldHandlers." $Handler "(ev, &element" $SubName ")"}} + {{end}} + + {{if eq $Field.ReturnType "int"}} + {{if $Field.IsLength}} + result := len({{".length" | TrimSuffix $Return}}) + {{else}} + result := int({{$Return}}) + {{end}} + {{else}} + result := {{$Return}} + {{end}} + + {{if not $Field.GetArrayPrefix}} + results = append(results, result) + {{else}} + results = append(results, result...) + {{end}} + + return results + } + value := iterator.Front(ctx) for value != nil { {{if $Field.Iterator.IsOrigTypePtr}} @@ -112,9 +162,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval {{if eq $Field.ReturnType "int"}} {{if $Field.IsLength}} - result := len({{".length" | TrimSuffix $Return}}) + result := len({{".length" | TrimSuffix $Return}}) {{else}} - result := int({{$Return}}) + result := int({{$Return}}) {{end}} {{else}} result := {{$Return}} @@ -136,7 +186,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval {{- else}} {{- $ReturnType := $Field.ReturnType}} EvalFnc: func(ctx *eval.Context) {{$Field.GetArrayPrefix}}{{$ReturnType}} { - ev := ctx.Event.(*Event) + {{- if not (and $Field.IsLength $Field.IsIterator)}} + ev := ctx.Event.(*Event) + {{end}} {{$Return := $Field.Name | printf "ev.%s"}} @@ -175,7 +227,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return int({{$Return}}) {{- else}} {{- if $Field.IsLength }} - {{$Return = ".length" | TrimSuffix $Return | printf "len(%s)"}} + {{- if $Field.IsIterator}} + iterator := &{{$Field.Iterator.ReturnType}}{} + {{$Return = "iterator.Len(ctx)"}} + {{else}} + {{$Return = ".length" | TrimSuffix $Return | printf "len(%s)"}} + {{end}} {{end}} return {{$Return}} {{end -}} @@ -235,7 +292,7 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { {{end}} case "{{$Name}}": - {{if $Field.Iterator}} + {{- if and $Field.Iterator (not $Field.IsLength)}} var values []{{$Field.ReturnType}} ctx := eval.NewContext(ev) @@ -260,7 +317,7 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { {{end}} {{if $Field.IsLength}} - {{$Return = ".length" | TrimSuffix $Return}} + {{$Return = ".length" | TrimSuffix $Return}} {{end}} {{if and (eq $Field.ReturnType "int") (ne $Field.OrigType "int")}} @@ -295,7 +352,13 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { {{end}} {{if $Field.IsLength}} - {{$Return = ".length" | TrimSuffix $Field.Name | printf "len(ev.%s)"}} + {{- if $Field.IsIterator}} + ctx := eval.NewContext(ev) + iterator := &{{$Field.Iterator.ReturnType}}{} + {{$Return = "iterator.Len(ctx)"}} + {{else}} + {{$Return = ".length" | TrimSuffix $Return | printf "len(%s)"}} + {{end}} {{end}} {{if $Field.Handler}} {{$Ptr := "&"}} diff --git a/pkg/security/secl/compiler/generators/accessors/common/types.go b/pkg/security/secl/compiler/generators/accessors/common/types.go index 3730f54f4f545..3e9f3ebef05f6 100644 --- a/pkg/security/secl/compiler/generators/accessors/common/types.go +++ b/pkg/security/secl/compiler/generators/accessors/common/types.go @@ -71,12 +71,15 @@ type StructField struct { GettersOnly bool Ref string RestrictedTo []string + IsIterator bool } // GetEvaluatorType returns the evaluator type name func (sf *StructField) GetEvaluatorType() string { var evaluatorType string - if sf.ReturnType == "int" { + if sf.IsLength && sf.IsIterator { + evaluatorType = "eval.IntEvaluator" + } else if sf.ReturnType == "int" { evaluatorType = "eval.IntEvaluator" if sf.Iterator != nil || sf.IsArray { evaluatorType = "eval.IntArrayEvaluator" diff --git a/pkg/security/secl/compiler/generators/accessors/doc/doc.go b/pkg/security/secl/compiler/generators/accessors/doc/doc.go index f0f77a7e6be7d..69a2068684a54 100644 --- a/pkg/security/secl/compiler/generators/accessors/doc/doc.go +++ b/pkg/security/secl/compiler/generators/accessors/doc/doc.go @@ -23,7 +23,8 @@ import ( const ( generateConstantsAnnotationPrefix = "// generate_constants:" - SECLDocForLength = "SECLDoc[length] Definition:`Length of the corresponding string`" // SECLDocForLength defines SECL doc for length + SECLDocForLength = "SECLDoc[length] Definition:`Length of the corresponding element`" // SECLDocForLength defines SECL doc for length + ) type documentation struct { diff --git a/pkg/security/secl/compiler/generators/accessors/field_accessors.tmpl b/pkg/security/secl/compiler/generators/accessors/field_accessors.tmpl index 358f7729c5c9d..02eff112541d7 100644 --- a/pkg/security/secl/compiler/generators/accessors/field_accessors.tmpl +++ b/pkg/security/secl/compiler/generators/accessors/field_accessors.tmpl @@ -33,7 +33,7 @@ import ( {{$accessorReturnType = $Field.ReturnType}} {{ end }} -{{ if or ($Field.Iterator) ($Field.IsArray) }} +{{ if or (and $Field.Iterator (not $Field.IsIterator)) ($Field.IsArray) }} {{$accessorReturnType = $accessorReturnType | printf "[]%s" }} {{ end }} @@ -47,7 +47,7 @@ func (ev *Event) Get{{$pascalCaseName}}() {{ $accessorReturnType }} { {{$Field | GeneratePrefixNilChecks $.AllFields $accessorReturnType}} - {{if $Field.Iterator}} + {{if and $Field.Iterator (not $Field.IsIterator)}} var values {{ $accessorReturnType }} ctx := eval.NewContext(ev) @@ -109,7 +109,13 @@ func (ev *Event) Get{{$pascalCaseName}}() {{ $accessorReturnType }} { {{end}} {{if $Field.IsLength}} - {{$Return = ".length" | TrimSuffix $Field.Name | printf "len(ev.%s)"}} + {{- if $Field.IsIterator}} + ctx := eval.NewContext(ev) + iterator := &{{$Field.Iterator.ReturnType}}{} + {{$Return = "iterator.Len(ctx)"}} + {{else}} + {{$Return = ".length" | TrimSuffix $Field.Name | printf "len(ev.%s)"}} + {{end}} {{end}} {{if $Field.Handler}} diff --git a/pkg/security/secl/model/accessors_unix.go b/pkg/security/secl/model/accessors_unix.go index fd8872ed29c9c..1b4a61d764a83 100644 --- a/pkg/security/secl/model/accessors_unix.go +++ b/pkg/security/secl/model/accessors_unix.go @@ -4501,6 +4501,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessArgs(ev, &element.ProcessContext.Process) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -4522,6 +4532,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessArgsFlags(ev, &element.ProcessContext.Process) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -4543,6 +4563,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessArgsOptions(ev, &element.ProcessContext.Process) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -4564,6 +4594,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []bool iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &element.ProcessContext.Process) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -4585,6 +4625,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessArgv(ev, &element.ProcessContext.Process) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -4606,6 +4656,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessArgv0(ev, &element.ProcessContext.Process) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -4626,6 +4686,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.AUID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -4646,6 +4716,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.CapEffective) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -4666,6 +4746,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.CapPermitted) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -4686,6 +4776,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.CGroup.CGroupFile.Inode) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -4706,6 +4806,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.CGroup.CGroupFile.MountID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -4727,6 +4837,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveCGroupID(ev, &element.ProcessContext.Process.CGroup) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -4748,6 +4868,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveCGroupManager(ev, &element.ProcessContext.Process.CGroup) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -4768,6 +4898,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.Comm + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -4789,6 +4929,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessContainerID(ev, &element.ProcessContext.Process) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -4810,6 +4960,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &element.ProcessContext.Process)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -4830,6 +4990,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.EGID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -4850,6 +5020,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.Credentials.EGroup + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -4871,6 +5051,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessEnvp(ev, &element.ProcessContext.Process) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -4892,6 +5082,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessEnvs(ev, &element.ProcessContext.Process) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -4913,6 +5113,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []bool iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &element.ProcessContext.Process) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -4933,6 +5143,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.EUID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -4953,6 +5173,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.Credentials.EUser + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -4973,6 +5203,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.FileEvent.FileFields.CTime) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -4999,6 +5242,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5024,6 +5280,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.FileEvent.FileFields.GID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5050,6 +5319,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.FileEvent.FileFields) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5076,6 +5358,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.FileEvent) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5102,6 +5397,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []bool iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, false) + } + result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.FileEvent.FileFields) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5127,6 +5435,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5152,6 +5473,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.FileEvent.FileFields.Mode) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5177,6 +5511,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.FileEvent.FileFields.MTime) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5202,6 +5549,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5229,6 +5589,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5256,6 +5629,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5277,6 +5660,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5303,6 +5699,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5329,6 +5738,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5356,6 +5778,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5383,6 +5818,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5404,6 +5849,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, 0) + } + result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.FileEvent.FileFields)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5429,6 +5887,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.FileEvent.FileFields.UID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5455,6 +5926,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.FileEvent.FileFields) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5480,6 +5964,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.FSGID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5500,6 +5994,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.Credentials.FSGroup + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5520,6 +6024,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.FSUID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5540,6 +6054,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.Credentials.FSUser + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5560,6 +6084,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.GID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5580,6 +6114,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.Credentials.Group + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5600,6 +6144,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5626,6 +6183,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5651,6 +6221,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5677,6 +6260,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5703,6 +6299,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5729,6 +6338,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []bool iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, false) + } + result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5754,6 +6376,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5779,6 +6414,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5804,6 +6452,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5829,6 +6490,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5856,6 +6530,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5883,6 +6570,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5904,6 +6601,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5930,6 +6640,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5956,6 +6679,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -5983,6 +6719,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -6010,6 +6759,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -6031,6 +6790,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, 0) + } + result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -6056,6 +6828,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -6082,6 +6867,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -6107,6 +6905,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []bool iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.PIDContext.IsKworker + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -6127,6 +6935,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []bool iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.IsThread + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -6139,6 +6957,15 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval }, Field: field, Weight: eval.IteratorWeight, }, nil + case "process.ancestors.length": + return &eval.IntEvaluator{ + EvalFnc: func(ctx *eval.Context) int { + iterator := &ProcessAncestorsIterator{} + return iterator.Len(ctx) + }, + Field: field, + Weight: eval.IteratorWeight, + }, nil case "process.ancestors.pid": return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { @@ -6147,6 +6974,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.PIDContext.Pid) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -6167,6 +7004,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.PPid) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -6187,6 +7034,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.PIDContext.Tid) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -6207,6 +7064,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.TTYName + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -6227,6 +7094,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.UID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -6247,6 +7124,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.Credentials.User + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -6268,6 +7155,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveK8SGroups(ev, &element.ProcessContext.Process.UserSession) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -6289,6 +7186,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveK8SUID(ev, &element.ProcessContext.Process.UserSession) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -6310,6 +7217,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveK8SUsername(ev, &element.ProcessContext.Process.UserSession) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8249,6 +9166,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessArgs(ev, &element.ProcessContext.Process) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8270,6 +9197,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessArgsFlags(ev, &element.ProcessContext.Process) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8291,6 +9228,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessArgsOptions(ev, &element.ProcessContext.Process) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8312,6 +9259,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []bool iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &element.ProcessContext.Process) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8333,6 +9290,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessArgv(ev, &element.ProcessContext.Process) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8354,6 +9321,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessArgv0(ev, &element.ProcessContext.Process) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8374,6 +9351,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.AUID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8394,6 +9381,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.CapEffective) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8414,6 +9411,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.CapPermitted) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8434,6 +9441,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.CGroup.CGroupFile.Inode) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8454,6 +9471,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.CGroup.CGroupFile.MountID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8475,6 +9502,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveCGroupID(ev, &element.ProcessContext.Process.CGroup) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8496,6 +9533,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveCGroupManager(ev, &element.ProcessContext.Process.CGroup) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8516,6 +9563,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.Comm + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8537,6 +9594,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessContainerID(ev, &element.ProcessContext.Process) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8558,6 +9625,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &element.ProcessContext.Process)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8578,6 +9655,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.EGID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8598,6 +9685,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.Credentials.EGroup + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8619,6 +9716,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessEnvp(ev, &element.ProcessContext.Process) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8640,6 +9747,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessEnvs(ev, &element.ProcessContext.Process) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8661,6 +9778,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []bool iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &element.ProcessContext.Process) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8681,6 +9808,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.EUID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8701,6 +9838,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.Credentials.EUser + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8721,6 +9868,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.FileEvent.FileFields.CTime) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8747,6 +9907,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8772,6 +9945,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.FileEvent.FileFields.GID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8798,6 +9984,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.FileEvent.FileFields) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8824,6 +10023,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.FileEvent) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8850,6 +10062,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []bool iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, false) + } + result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.FileEvent.FileFields) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8875,6 +10100,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8900,6 +10138,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.FileEvent.FileFields.Mode) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8925,6 +10176,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.FileEvent.FileFields.MTime) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8950,6 +10214,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -8977,6 +10254,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9004,6 +10294,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9025,6 +10325,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9051,6 +10364,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9077,6 +10403,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9104,6 +10443,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9131,6 +10483,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9152,6 +10514,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, 0) + } + result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.FileEvent.FileFields)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9177,6 +10552,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.FileEvent.FileFields.UID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9203,6 +10591,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.FileEvent.FileFields) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9228,6 +10629,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.FSGID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9248,6 +10659,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.Credentials.FSGroup + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9268,6 +10689,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.FSUID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9288,6 +10719,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.Credentials.FSUser + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9308,6 +10749,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.GID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9328,6 +10779,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.Credentials.Group + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9348,6 +10809,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9374,6 +10848,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9399,6 +10886,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9425,6 +10925,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9451,6 +10964,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9477,6 +11003,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []bool iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, false) + } + result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9502,6 +11041,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9527,6 +11079,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9552,6 +11117,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9577,6 +11155,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9604,6 +11195,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9631,6 +11235,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9652,6 +11266,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9678,6 +11305,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9704,6 +11344,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9731,6 +11384,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9758,6 +11424,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9779,6 +11455,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, 0) + } + result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9804,6 +11493,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9830,6 +11532,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9855,6 +11570,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []bool iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.PIDContext.IsKworker + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9875,6 +11600,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []bool iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.IsThread + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9887,6 +11622,15 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval }, Field: field, Weight: eval.IteratorWeight, }, nil + case "ptrace.tracee.ancestors.length": + return &eval.IntEvaluator{ + EvalFnc: func(ctx *eval.Context) int { + iterator := &ProcessAncestorsIterator{} + return iterator.Len(ctx) + }, + Field: field, + Weight: eval.IteratorWeight, + }, nil case "ptrace.tracee.ancestors.pid": return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { @@ -9895,6 +11639,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.PIDContext.Pid) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9915,6 +11669,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.PPid) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9935,6 +11699,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.PIDContext.Tid) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9955,6 +11729,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.TTYName + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9975,6 +11759,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.UID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -9995,6 +11789,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.Credentials.User + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -10016,6 +11820,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveK8SGroups(ev, &element.ProcessContext.Process.UserSession) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -10037,6 +11851,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveK8SUID(ev, &element.ProcessContext.Process.UserSession) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -10058,6 +11882,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveK8SUsername(ev, &element.ProcessContext.Process.UserSession) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13151,6 +14985,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessArgs(ev, &element.ProcessContext.Process) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13172,6 +15016,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessArgsFlags(ev, &element.ProcessContext.Process) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13193,6 +15047,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessArgsOptions(ev, &element.ProcessContext.Process) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13214,6 +15078,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []bool iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &element.ProcessContext.Process) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13235,6 +15109,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessArgv(ev, &element.ProcessContext.Process) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13256,6 +15140,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessArgv0(ev, &element.ProcessContext.Process) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13276,6 +15170,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.AUID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13296,6 +15200,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.CapEffective) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13316,6 +15230,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.CapPermitted) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13336,6 +15260,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.CGroup.CGroupFile.Inode) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13356,6 +15290,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.CGroup.CGroupFile.MountID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13377,6 +15321,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveCGroupID(ev, &element.ProcessContext.Process.CGroup) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13398,6 +15352,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveCGroupManager(ev, &element.ProcessContext.Process.CGroup) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13418,6 +15382,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.Comm + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13439,6 +15413,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessContainerID(ev, &element.ProcessContext.Process) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13460,6 +15444,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &element.ProcessContext.Process)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13480,6 +15474,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.EGID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13500,6 +15504,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.Credentials.EGroup + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13521,6 +15535,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessEnvp(ev, &element.ProcessContext.Process) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13542,6 +15566,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessEnvs(ev, &element.ProcessContext.Process) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13563,6 +15597,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []bool iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &element.ProcessContext.Process) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13583,6 +15627,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.EUID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13603,6 +15657,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.Credentials.EUser + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13623,6 +15687,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.FileEvent.FileFields.CTime) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13649,6 +15726,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13674,6 +15764,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.FileEvent.FileFields.GID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13700,6 +15803,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.FileEvent.FileFields) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13726,6 +15842,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.FileEvent) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13752,6 +15881,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []bool iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, false) + } + result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.FileEvent.FileFields) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13777,6 +15919,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13802,6 +15957,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.FileEvent.FileFields.Mode) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13827,6 +15995,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.FileEvent.FileFields.MTime) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13852,6 +16033,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13879,6 +16073,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13906,6 +16113,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13927,6 +16144,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13953,6 +16183,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -13979,6 +16222,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14006,6 +16262,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14033,6 +16302,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14054,6 +16333,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, 0) + } + result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.FileEvent.FileFields)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14079,6 +16371,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.FileEvent.FileFields.UID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14105,6 +16410,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.IsNotKworker() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.FileEvent.FileFields) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14130,6 +16448,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.FSGID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14150,6 +16478,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.Credentials.FSGroup + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14170,6 +16508,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.FSUID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14190,6 +16538,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.Credentials.FSUser + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14210,6 +16568,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.GID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14230,6 +16598,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.Credentials.Group + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14250,6 +16628,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14276,6 +16667,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14301,6 +16705,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14327,6 +16744,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14353,6 +16783,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14379,6 +16822,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []bool iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, false) + } + result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14404,6 +16860,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14429,6 +16898,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14454,6 +16936,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14479,6 +16974,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14506,6 +17014,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14533,6 +17054,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14554,6 +17085,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14580,6 +17124,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14606,6 +17163,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14633,6 +17203,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14660,6 +17243,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14681,6 +17274,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, 0) + } + result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14706,6 +17312,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, 0) + } + result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14732,6 +17351,19 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + if !element.ProcessContext.Process.HasInterpreter() { + return append(results, "") + } + result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14757,6 +17389,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []bool iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.PIDContext.IsKworker + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14777,6 +17419,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []bool iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.IsThread + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14789,6 +17441,15 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval }, Field: field, Weight: eval.IteratorWeight, }, nil + case "signal.target.ancestors.length": + return &eval.IntEvaluator{ + EvalFnc: func(ctx *eval.Context) int { + iterator := &ProcessAncestorsIterator{} + return iterator.Len(ctx) + }, + Field: field, + Weight: eval.IteratorWeight, + }, nil case "signal.target.ancestors.pid": return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { @@ -14797,6 +17458,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.PIDContext.Pid) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14817,6 +17488,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.PPid) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14837,6 +17518,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.PIDContext.Tid) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14857,6 +17548,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.TTYName + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14877,6 +17578,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.Credentials.UID) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14897,6 +17608,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.Credentials.User + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14918,6 +17639,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveK8SGroups(ev, &element.ProcessContext.Process.UserSession) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14939,6 +17670,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveK8SUID(ev, &element.ProcessContext.Process.UserSession) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -14960,6 +17701,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveK8SUsername(ev, &element.ProcessContext.Process.UserSession) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -18077,6 +20828,7 @@ func (ev *Event) GetFields() []eval.Field { "process.ancestors.interpreter.file.user", "process.ancestors.is_kworker", "process.ancestors.is_thread", + "process.ancestors.length", "process.ancestors.pid", "process.ancestors.ppid", "process.ancestors.tid", @@ -18319,6 +21071,7 @@ func (ev *Event) GetFields() []eval.Field { "ptrace.tracee.ancestors.interpreter.file.user", "ptrace.tracee.ancestors.is_kworker", "ptrace.tracee.ancestors.is_thread", + "ptrace.tracee.ancestors.length", "ptrace.tracee.ancestors.pid", "ptrace.tracee.ancestors.ppid", "ptrace.tracee.ancestors.tid", @@ -18687,6 +21440,7 @@ func (ev *Event) GetFields() []eval.Field { "signal.target.ancestors.interpreter.file.user", "signal.target.ancestors.is_kworker", "signal.target.ancestors.is_thread", + "signal.target.ancestors.length", "signal.target.ancestors.pid", "signal.target.ancestors.ppid", "signal.target.ancestors.tid", @@ -20482,17 +23236,7 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { } return values, nil case "process.ancestors.file.name.length": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil + return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent), nil case "process.ancestors.file.package.name": var values []string ctx := eval.NewContext(ev) @@ -20542,17 +23286,7 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { } return values, nil case "process.ancestors.file.path.length": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil + return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent), nil case "process.ancestors.file.rights": var values []int ctx := eval.NewContext(ev) @@ -20794,17 +23528,7 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { } return values, nil case "process.ancestors.interpreter.file.name.length": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil + return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent), nil case "process.ancestors.interpreter.file.package.name": var values []string ctx := eval.NewContext(ev) @@ -20854,17 +23578,7 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { } return values, nil case "process.ancestors.interpreter.file.path.length": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil + return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent), nil case "process.ancestors.interpreter.file.rights": var values []int ctx := eval.NewContext(ev) @@ -20925,6 +23639,10 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { ptr = iterator.Next() } return values, nil + case "process.ancestors.length": + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + return iterator.Len(ctx), nil case "process.ancestors.pid": var values []int ctx := eval.NewContext(ev) @@ -22210,17 +24928,7 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { } return values, nil case "ptrace.tracee.ancestors.file.name.length": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil + return ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent), nil case "ptrace.tracee.ancestors.file.package.name": var values []string ctx := eval.NewContext(ev) @@ -22270,17 +24978,7 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { } return values, nil case "ptrace.tracee.ancestors.file.path.length": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil + return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent), nil case "ptrace.tracee.ancestors.file.rights": var values []int ctx := eval.NewContext(ev) @@ -22522,17 +25220,7 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { } return values, nil case "ptrace.tracee.ancestors.interpreter.file.name.length": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil + return ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent), nil case "ptrace.tracee.ancestors.interpreter.file.package.name": var values []string ctx := eval.NewContext(ev) @@ -22582,17 +25270,7 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { } return values, nil case "ptrace.tracee.ancestors.interpreter.file.path.length": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil + return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent), nil case "ptrace.tracee.ancestors.interpreter.file.rights": var values []int ctx := eval.NewContext(ev) @@ -22653,6 +25331,10 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { ptr = iterator.Next() } return values, nil + case "ptrace.tracee.ancestors.length": + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + return iterator.Len(ctx), nil case "ptrace.tracee.ancestors.pid": var values []int ctx := eval.NewContext(ev) @@ -24190,17 +26872,7 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { } return values, nil case "signal.target.ancestors.file.name.length": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil + return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Ancestor.ProcessContext.Process.FileEvent), nil case "signal.target.ancestors.file.package.name": var values []string ctx := eval.NewContext(ev) @@ -24250,17 +26922,7 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { } return values, nil case "signal.target.ancestors.file.path.length": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil + return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Ancestor.ProcessContext.Process.FileEvent), nil case "signal.target.ancestors.file.rights": var values []int ctx := eval.NewContext(ev) @@ -24502,17 +27164,7 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { } return values, nil case "signal.target.ancestors.interpreter.file.name.length": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil + return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent), nil case "signal.target.ancestors.interpreter.file.package.name": var values []string ctx := eval.NewContext(ev) @@ -24562,17 +27214,7 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { } return values, nil case "signal.target.ancestors.interpreter.file.path.length": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil + return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent), nil case "signal.target.ancestors.interpreter.file.rights": var values []int ctx := eval.NewContext(ev) @@ -24633,6 +27275,10 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { ptr = iterator.Next() } return values, nil + case "signal.target.ancestors.length": + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + return iterator.Len(ctx), nil case "signal.target.ancestors.pid": var values []int ctx := eval.NewContext(ev) @@ -26718,6 +29364,8 @@ func (ev *Event) GetFieldEventType(field eval.Field) (eval.EventType, error) { return "", nil case "process.ancestors.is_thread": return "", nil + case "process.ancestors.length": + return "", nil case "process.ancestors.pid": return "", nil case "process.ancestors.ppid": @@ -27202,6 +29850,8 @@ func (ev *Event) GetFieldEventType(field eval.Field) (eval.EventType, error) { return "ptrace", nil case "ptrace.tracee.ancestors.is_thread": return "ptrace", nil + case "ptrace.tracee.ancestors.length": + return "ptrace", nil case "ptrace.tracee.ancestors.pid": return "ptrace", nil case "ptrace.tracee.ancestors.ppid": @@ -27938,6 +30588,8 @@ func (ev *Event) GetFieldEventType(field eval.Field) (eval.EventType, error) { return "signal", nil case "signal.target.ancestors.is_thread": return "signal", nil + case "signal.target.ancestors.length": + return "signal", nil case "signal.target.ancestors.pid": return "signal", nil case "signal.target.ancestors.ppid": @@ -29489,6 +32141,8 @@ func (ev *Event) GetFieldType(field eval.Field) (reflect.Kind, error) { return reflect.Bool, nil case "process.ancestors.is_thread": return reflect.Bool, nil + case "process.ancestors.length": + return reflect.Int, nil case "process.ancestors.pid": return reflect.Int, nil case "process.ancestors.ppid": @@ -29973,6 +32627,8 @@ func (ev *Event) GetFieldType(field eval.Field) (reflect.Kind, error) { return reflect.Bool, nil case "ptrace.tracee.ancestors.is_thread": return reflect.Bool, nil + case "ptrace.tracee.ancestors.length": + return reflect.Int, nil case "ptrace.tracee.ancestors.pid": return reflect.Int, nil case "ptrace.tracee.ancestors.ppid": @@ -30709,6 +33365,8 @@ func (ev *Event) GetFieldType(field eval.Field) (reflect.Kind, error) { return reflect.Bool, nil case "signal.target.ancestors.is_thread": return reflect.Bool, nil + case "signal.target.ancestors.length": + return reflect.Int, nil case "signal.target.ancestors.pid": return reflect.Int, nil case "signal.target.ancestors.ppid": @@ -35923,6 +38581,14 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.IsThread = rv return nil + case "process.ancestors.length": + if ev.BaseEvent.ProcessContext == nil { + ev.BaseEvent.ProcessContext = &ProcessContext{} + } + if ev.BaseEvent.ProcessContext.Ancestor == nil { + ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} + } + return &eval.ErrFieldReadOnly{Field: "process.ancestors.length"} case "process.ancestors.pid": if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} @@ -38865,6 +41531,14 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.IsThread = rv return nil + case "ptrace.tracee.ancestors.length": + if ev.PTrace.Tracee == nil { + ev.PTrace.Tracee = &ProcessContext{} + } + if ev.PTrace.Tracee.Ancestor == nil { + ev.PTrace.Tracee.Ancestor = &ProcessCacheEntry{} + } + return &eval.ErrFieldReadOnly{Field: "ptrace.tracee.ancestors.length"} case "ptrace.tracee.ancestors.pid": if ev.PTrace.Tracee == nil { ev.PTrace.Tracee = &ProcessContext{} @@ -42684,6 +45358,14 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } ev.Signal.Target.Ancestor.ProcessContext.Process.IsThread = rv return nil + case "signal.target.ancestors.length": + if ev.Signal.Target == nil { + ev.Signal.Target = &ProcessContext{} + } + if ev.Signal.Target.Ancestor == nil { + ev.Signal.Target.Ancestor = &ProcessCacheEntry{} + } + return &eval.ErrFieldReadOnly{Field: "signal.target.ancestors.length"} case "signal.target.ancestors.pid": if ev.Signal.Target == nil { ev.Signal.Target = &ProcessContext{} diff --git a/pkg/security/secl/model/accessors_windows.go b/pkg/security/secl/model/accessors_windows.go index 9d946ba35751c..2cc1f461e6a14 100644 --- a/pkg/security/secl/model/accessors_windows.go +++ b/pkg/security/secl/model/accessors_windows.go @@ -795,6 +795,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessCmdLine(ev, &element.ProcessContext.Process) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -815,6 +825,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.ContainerID + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -836,6 +856,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &element.ProcessContext.Process)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -857,6 +887,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessEnvp(ev, &element.ProcessContext.Process) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -878,6 +918,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessEnvs(ev, &element.ProcessContext.Process) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -900,6 +950,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -922,6 +982,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -944,6 +1014,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -966,6 +1046,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -978,6 +1068,15 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval }, Field: field, Weight: eval.IteratorWeight, }, nil + case "process.ancestors.length": + return &eval.IntEvaluator{ + EvalFnc: func(ctx *eval.Context) int { + iterator := &ProcessAncestorsIterator{} + return iterator.Len(ctx) + }, + Field: field, + Weight: eval.IteratorWeight, + }, nil case "process.ancestors.pid": return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { @@ -986,6 +1085,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.PIDContext.Pid) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -1006,6 +1115,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.PPid) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -1027,6 +1146,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveUser(ev, &element.ProcessContext.Process) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -1047,6 +1176,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.OwnerSidString + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -1739,6 +1878,7 @@ func (ev *Event) GetFields() []eval.Field { "process.ancestors.file.name.length", "process.ancestors.file.path", "process.ancestors.file.path.length", + "process.ancestors.length", "process.ancestors.pid", "process.ancestors.ppid", "process.ancestors.user", @@ -2036,17 +2176,7 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { } return values, nil case "process.ancestors.file.name.length": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil + return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent), nil case "process.ancestors.file.path": var values []string ctx := eval.NewContext(ev) @@ -2060,17 +2190,11 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { } return values, nil case "process.ancestors.file.path.length": - var values []int + return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent), nil + case "process.ancestors.length": ctx := eval.NewContext(ev) iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil + return iterator.Len(ctx), nil case "process.ancestors.pid": var values []int ctx := eval.NewContext(ev) @@ -2449,6 +2573,8 @@ func (ev *Event) GetFieldEventType(field eval.Field) (eval.EventType, error) { return "", nil case "process.ancestors.file.path.length": return "", nil + case "process.ancestors.length": + return "", nil case "process.ancestors.pid": return "", nil case "process.ancestors.ppid": @@ -2754,6 +2880,8 @@ func (ev *Event) GetFieldType(field eval.Field) (reflect.Kind, error) { return reflect.String, nil case "process.ancestors.file.path.length": return reflect.Int, nil + case "process.ancestors.length": + return reflect.Int, nil case "process.ancestors.pid": return reflect.Int, nil case "process.ancestors.ppid": @@ -3544,6 +3672,14 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} } return &eval.ErrFieldReadOnly{Field: "process.ancestors.file.path.length"} + case "process.ancestors.length": + if ev.BaseEvent.ProcessContext == nil { + ev.BaseEvent.ProcessContext = &ProcessContext{} + } + if ev.BaseEvent.ProcessContext.Ancestor == nil { + ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} + } + return &eval.ErrFieldReadOnly{Field: "process.ancestors.length"} case "process.ancestors.pid": if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} diff --git a/pkg/security/secl/model/events.go b/pkg/security/secl/model/events.go index 5804216d60297..8e44b9aa1e97c 100644 --- a/pkg/security/secl/model/events.go +++ b/pkg/security/secl/model/events.go @@ -269,6 +269,8 @@ func (t EventType) String() string { return "change_permission" case LoginUIDWriteEventType: return "login_uid_write" + case CgroupWriteEventType: + return "cgroup_write" default: return "unknown" } diff --git a/pkg/security/secl/model/field_accessors_unix.go b/pkg/security/secl/model/field_accessors_unix.go index 2398cbdbd6b7f..71e1fbaab29bc 100644 --- a/pkg/security/secl/model/field_accessors_unix.go +++ b/pkg/security/secl/model/field_accessors_unix.go @@ -6358,6 +6358,19 @@ func (ev *Event) GetProcessAncestorsIsThread() []bool { return values } +// GetProcessAncestorsLength returns the value of the field, resolving if necessary +func (ev *Event) GetProcessAncestorsLength() int { + if ev.BaseEvent.ProcessContext == nil { + return 0 + } + if ev.BaseEvent.ProcessContext.Ancestor == nil { + return 0 + } + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + return iterator.Len(ctx) +} + // GetProcessAncestorsPid returns the value of the field, resolving if necessary func (ev *Event) GetProcessAncestorsPid() []uint32 { if ev.BaseEvent.ProcessContext == nil { @@ -10393,6 +10406,22 @@ func (ev *Event) GetPtraceTraceeAncestorsIsThread() []bool { return values } +// GetPtraceTraceeAncestorsLength returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeAncestorsLength() int { + if ev.GetEventType().String() != "ptrace" { + return 0 + } + if ev.PTrace.Tracee == nil { + return 0 + } + if ev.PTrace.Tracee.Ancestor == nil { + return 0 + } + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + return iterator.Len(ctx) +} + // GetPtraceTraceeAncestorsPid returns the value of the field, resolving if necessary func (ev *Event) GetPtraceTraceeAncestorsPid() []uint32 { if ev.GetEventType().String() != "ptrace" { @@ -16018,6 +16047,22 @@ func (ev *Event) GetSignalTargetAncestorsIsThread() []bool { return values } +// GetSignalTargetAncestorsLength returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetAncestorsLength() int { + if ev.GetEventType().String() != "signal" { + return 0 + } + if ev.Signal.Target == nil { + return 0 + } + if ev.Signal.Target.Ancestor == nil { + return 0 + } + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + return iterator.Len(ctx) +} + // GetSignalTargetAncestorsPid returns the value of the field, resolving if necessary func (ev *Event) GetSignalTargetAncestorsPid() []uint32 { if ev.GetEventType().String() != "signal" { diff --git a/pkg/security/secl/model/field_accessors_windows.go b/pkg/security/secl/model/field_accessors_windows.go index d1cfc889afacb..1ed3130e6951e 100644 --- a/pkg/security/secl/model/field_accessors_windows.go +++ b/pkg/security/secl/model/field_accessors_windows.go @@ -984,6 +984,19 @@ func (ev *Event) GetProcessAncestorsFilePathLength() []int { return values } +// GetProcessAncestorsLength returns the value of the field, resolving if necessary +func (ev *Event) GetProcessAncestorsLength() int { + if ev.BaseEvent.ProcessContext == nil { + return 0 + } + if ev.BaseEvent.ProcessContext.Ancestor == nil { + return 0 + } + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + return iterator.Len(ctx) +} + // GetProcessAncestorsPid returns the value of the field, resolving if necessary func (ev *Event) GetProcessAncestorsPid() []uint32 { if ev.BaseEvent.ProcessContext == nil { diff --git a/pkg/security/secl/model/model.go b/pkg/security/secl/model/model.go index 5593d1502a45b..e438c1b0d8ec7 100644 --- a/pkg/security/secl/model/model.go +++ b/pkg/security/secl/model/model.go @@ -506,6 +506,43 @@ func (it *ProcessAncestorsIterator) Next() *ProcessCacheEntry { return nil } +// At returns the element at the given position +func (it *ProcessAncestorsIterator) At(ctx *eval.Context, regID eval.RegisterID, pos int) *ProcessCacheEntry { + if entry := ctx.RegisterCache[regID]; entry != nil && entry.Pos == pos { + return entry.Value.(*ProcessCacheEntry) + } + + var i int + + ancestor := ctx.Event.(*Event).ProcessContext.Ancestor + for ancestor != nil { + if i == pos { + ctx.RegisterCache[regID] = &eval.RegisterCacheEntry{ + Pos: pos, + Value: ancestor, + } + return ancestor + } + ancestor = ancestor.Ancestor + i++ + } + + return nil +} + +// Len returns the len +func (it *ProcessAncestorsIterator) Len(ctx *eval.Context) int { + var size int + + ancestor := ctx.Event.(*Event).ProcessContext.Ancestor + for ancestor != nil { + size++ + ancestor = ancestor.Ancestor + } + + return size +} + // HasParent returns whether the process has a parent func (p *ProcessContext) HasParent() bool { return p.Parent != nil diff --git a/pkg/security/secl/model/unmarshallers_linux.go b/pkg/security/secl/model/unmarshallers_linux.go index 6b5e95d61006c..ab0955abc6195 100644 --- a/pkg/security/secl/model/unmarshallers_linux.go +++ b/pkg/security/secl/model/unmarshallers_linux.go @@ -1332,13 +1332,13 @@ func (e *RawPacketEvent) UnmarshalBinary(data []byte) (int, error) { if layer := packet.Layer(layers.LayerTypeUDP); layer != nil { if rl, ok := layer.(*layers.UDP); ok { - e.L4Protocol = uint16(layers.LayerTypeUDP) + e.L4Protocol = unix.IPPROTO_UDP e.Source.Port = uint16(rl.SrcPort) e.Destination.Port = uint16(rl.DstPort) } } else if layer := packet.Layer(layers.LayerTypeTCP); layer != nil { if rl, ok := layer.(*layers.TCP); ok { - e.L4Protocol = uint16(layers.IPProtocolTCP) + e.L4Protocol = unix.IPPROTO_TCP e.Source.Port = uint16(rl.SrcPort) e.Destination.Port = uint16(rl.DstPort) } diff --git a/pkg/security/seclwin/model/accessors_win.go b/pkg/security/seclwin/model/accessors_win.go index 0183570889f98..c6e0e7b3bb650 100644 --- a/pkg/security/seclwin/model/accessors_win.go +++ b/pkg/security/seclwin/model/accessors_win.go @@ -793,6 +793,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessCmdLine(ev, &element.ProcessContext.Process) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -813,6 +823,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.ContainerID + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -834,6 +854,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &element.ProcessContext.Process)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -855,6 +885,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessEnvp(ev, &element.ProcessContext.Process) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -876,6 +916,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveProcessEnvs(ev, &element.ProcessContext.Process) + results = append(results, result...) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -898,6 +948,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -920,6 +980,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -942,6 +1012,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -964,6 +1044,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -976,6 +1066,15 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval }, Field: field, Weight: eval.IteratorWeight, }, nil + case "process.ancestors.length": + return &eval.IntEvaluator{ + EvalFnc: func(ctx *eval.Context) int { + iterator := &ProcessAncestorsIterator{} + return iterator.Len(ctx) + }, + Field: field, + Weight: eval.IteratorWeight, + }, nil case "process.ancestors.pid": return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { @@ -984,6 +1083,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.PIDContext.Pid) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -1004,6 +1113,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []int iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := int(element.ProcessContext.Process.PPid) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -1025,6 +1144,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := ev.FieldHandlers.ResolveUser(ev, &element.ProcessContext.Process) + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -1045,6 +1174,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } var results []string iterator := &ProcessAncestorsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return results + } + element := value + result := element.ProcessContext.Process.OwnerSidString + results = append(results, result) + return results + } value := iterator.Front(ctx) for value != nil { element := value @@ -1737,6 +1876,7 @@ func (ev *Event) GetFields() []eval.Field { "process.ancestors.file.name.length", "process.ancestors.file.path", "process.ancestors.file.path.length", + "process.ancestors.length", "process.ancestors.pid", "process.ancestors.ppid", "process.ancestors.user", @@ -2034,17 +2174,7 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { } return values, nil case "process.ancestors.file.name.length": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil + return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent), nil case "process.ancestors.file.path": var values []string ctx := eval.NewContext(ev) @@ -2058,17 +2188,11 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { } return values, nil case "process.ancestors.file.path.length": - var values []int + return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent), nil + case "process.ancestors.length": ctx := eval.NewContext(ev) iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil + return iterator.Len(ctx), nil case "process.ancestors.pid": var values []int ctx := eval.NewContext(ev) @@ -2447,6 +2571,8 @@ func (ev *Event) GetFieldEventType(field eval.Field) (eval.EventType, error) { return "", nil case "process.ancestors.file.path.length": return "", nil + case "process.ancestors.length": + return "", nil case "process.ancestors.pid": return "", nil case "process.ancestors.ppid": @@ -2752,6 +2878,8 @@ func (ev *Event) GetFieldType(field eval.Field) (reflect.Kind, error) { return reflect.String, nil case "process.ancestors.file.path.length": return reflect.Int, nil + case "process.ancestors.length": + return reflect.Int, nil case "process.ancestors.pid": return reflect.Int, nil case "process.ancestors.ppid": @@ -3542,6 +3670,14 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} } return &eval.ErrFieldReadOnly{Field: "process.ancestors.file.path.length"} + case "process.ancestors.length": + if ev.BaseEvent.ProcessContext == nil { + ev.BaseEvent.ProcessContext = &ProcessContext{} + } + if ev.BaseEvent.ProcessContext.Ancestor == nil { + ev.BaseEvent.ProcessContext.Ancestor = &ProcessCacheEntry{} + } + return &eval.ErrFieldReadOnly{Field: "process.ancestors.length"} case "process.ancestors.pid": if ev.BaseEvent.ProcessContext == nil { ev.BaseEvent.ProcessContext = &ProcessContext{} diff --git a/pkg/security/seclwin/model/events.go b/pkg/security/seclwin/model/events.go index 5804216d60297..8e44b9aa1e97c 100644 --- a/pkg/security/seclwin/model/events.go +++ b/pkg/security/seclwin/model/events.go @@ -269,6 +269,8 @@ func (t EventType) String() string { return "change_permission" case LoginUIDWriteEventType: return "login_uid_write" + case CgroupWriteEventType: + return "cgroup_write" default: return "unknown" } diff --git a/pkg/security/seclwin/model/model.go b/pkg/security/seclwin/model/model.go index 5593d1502a45b..e438c1b0d8ec7 100644 --- a/pkg/security/seclwin/model/model.go +++ b/pkg/security/seclwin/model/model.go @@ -506,6 +506,43 @@ func (it *ProcessAncestorsIterator) Next() *ProcessCacheEntry { return nil } +// At returns the element at the given position +func (it *ProcessAncestorsIterator) At(ctx *eval.Context, regID eval.RegisterID, pos int) *ProcessCacheEntry { + if entry := ctx.RegisterCache[regID]; entry != nil && entry.Pos == pos { + return entry.Value.(*ProcessCacheEntry) + } + + var i int + + ancestor := ctx.Event.(*Event).ProcessContext.Ancestor + for ancestor != nil { + if i == pos { + ctx.RegisterCache[regID] = &eval.RegisterCacheEntry{ + Pos: pos, + Value: ancestor, + } + return ancestor + } + ancestor = ancestor.Ancestor + i++ + } + + return nil +} + +// Len returns the len +func (it *ProcessAncestorsIterator) Len(ctx *eval.Context) int { + var size int + + ancestor := ctx.Event.(*Event).ProcessContext.Ancestor + for ancestor != nil { + size++ + ancestor = ancestor.Ancestor + } + + return size +} + // HasParent returns whether the process has a parent func (p *ProcessContext) HasParent() bool { return p.Parent != nil diff --git a/pkg/security/security_profile/profile/profile.go b/pkg/security/security_profile/profile/profile.go index 83272e911901d..65f62db7c9568 100644 --- a/pkg/security/security_profile/profile/profile.go +++ b/pkg/security/security_profile/profile/profile.go @@ -11,7 +11,6 @@ package profile import ( "errors" "fmt" - "io" "math" "os" "slices" @@ -188,13 +187,7 @@ func (p *SecurityProfile) NewProcessNodeCallback(_ *activity_tree.ProcessNode) { // LoadProtoFromFile loads proto profile from file func LoadProtoFromFile(filepath string) (*proto.SecurityProfile, error) { - f, err := os.Open(filepath) - if err != nil { - return nil, fmt.Errorf("couldn't open profile: %w", err) - } - defer f.Close() - - raw, err := io.ReadAll(f) + raw, err := os.ReadFile(filepath) if err != nil { return nil, fmt.Errorf("couldn't read profile: %w", err) } diff --git a/pkg/security/tests/action_test.go b/pkg/security/tests/action_test.go index 07d959e88bec4..cd8a081530ad9 100644 --- a/pkg/security/tests/action_test.go +++ b/pkg/security/tests/action_test.go @@ -127,6 +127,9 @@ func TestActionKill(t *testing.T) { if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.signal == 'SIGUSR2')]`); err != nil || el == nil || len(el.([]interface{})) == 0 { t.Errorf("element not found %s => %v", string(msg.Data), err) } + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.status == 'performed')]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) + } }) return nil @@ -182,6 +185,9 @@ func TestActionKill(t *testing.T) { if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.exited_at =~ /20.*/)]`); err != nil || el == nil || len(el.([]interface{})) == 0 { t.Errorf("element not found %s => %v", string(msg.Data), err) } + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.status == 'performed')]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) + } }) return nil @@ -331,6 +337,9 @@ func TestActionKillRuleSpecific(t *testing.T) { if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.exited_at =~ /20.*/)]`); err != nil || el == nil || len(el.([]interface{})) == 0 { t.Errorf("element not found %s => %v", string(msg.Data), err) } + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.status == 'performed')]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) + } }) return nil @@ -399,6 +408,9 @@ func testActionKillDisarm(t *testing.T, test *testModule, sleep, syscallTester s if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.exited_at =~ /20.*/)]`); err != nil || el == nil || len(el.([]interface{})) == 0 { t.Errorf("element not found %s => %v", string(msg.Data), err) } + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.status == 'performed')]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) + } }) return nil @@ -406,7 +418,7 @@ func testActionKillDisarm(t *testing.T, test *testModule, sleep, syscallTester s assert.NoError(t, err) } - testKillActionIgnored := func(t *testing.T, ruleID string, cmdFunc func(context.Context)) { + testKillActionDisarmed := func(t *testing.T, ruleID string, cmdFunc func(context.Context)) { test.msgSender.flush() err := test.GetEventSent(t, func() error { cmdFunc(nil) @@ -426,8 +438,11 @@ func testActionKillDisarm(t *testing.T, test *testModule, sleep, syscallTester s validateMessageSchema(t, string(msg.Data)) jsonPathValidation(test, msg.Data, func(_ *testModule, obj interface{}) { - if _, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions`); err == nil { - t.Errorf("unexpected rule action %s", string(msg.Data)) + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.signal == 'SIGKILL')]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) + } + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.status == 'rule_disarmed')]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) } }) @@ -447,8 +462,8 @@ func testActionKillDisarm(t *testing.T, test *testModule, sleep, syscallTester s }) } - // test that another executable dismars the kill action - testKillActionIgnored(t, "kill_action_disarm_executable", func(_ context.Context) { + // test that another executable disarms the kill action + testKillActionDisarmed(t, "kill_action_disarm_executable", func(_ context.Context) { cmd := exec.Command(sleep, "1") cmd.Env = []string{"TARGETTOKILL=1"} _ = cmd.Run() @@ -486,8 +501,8 @@ func testActionKillDisarm(t *testing.T, test *testModule, sleep, syscallTester s } defer newDockerInstance.stop() - // test that another container dismars the kill action - testKillActionIgnored(t, "kill_action_disarm_container", func(_ context.Context) { + // test that another container disarms the kill action + testKillActionDisarmed(t, "kill_action_disarm_container", func(_ context.Context) { cmd := newDockerInstance.Command("env", []string{"-i", "-", "TARGETTOKILL=1", "sleep", "1"}, []string{}) _ = cmd.Run() }) diff --git a/pkg/security/tests/event_test.go b/pkg/security/tests/event_test.go index bf567e41d29d2..e1f9280d204da 100644 --- a/pkg/security/tests/event_test.go +++ b/pkg/security/tests/event_test.go @@ -10,6 +10,7 @@ package tests import ( "context" + "fmt" "os" "path" "path/filepath" @@ -24,6 +25,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/metrics" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" + "github.com/DataDog/datadog-agent/pkg/security/utils" ) func TestEventRulesetLoaded(t *testing.T) { @@ -219,6 +221,64 @@ func TestEventRaleLimiters(t *testing.T) { }) } +func TestEventIteratorRegister(t *testing.T) { + SkipIfNotAvailable(t) + + pid1ExePath := utils.ProcExePath(1) + pid1Path, err := os.Readlink(pid1ExePath) + if err != nil { + t.Fatal(err) + } + + ruleDefs := []*rules.RuleDefinition{ + { + ID: "test_register_1", + Expression: `open.file.path == "{{.Root}}/test-register" && process.ancestors[A].name == "syscall_tester" && process.ancestors[A].argv in ["span-exec"]`, + }, + { + ID: "test_register_2", + Expression: fmt.Sprintf(`open.file.path == "{{.Root}}/test-register" && process.ancestors[A].file.path == "%s" && process.ancestors[A].pid == 1`, pid1Path), + }, + } + + test, err := newTestModule(t, nil, ruleDefs) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + testFile, _, err := test.Path("test-register") + if err != nil { + t.Fatal(err) + } + defer os.Remove(testFile) + + syscallTester, err := loadSyscallTester(t, test, "syscall_tester") + if err != nil { + t.Fatal(err) + } + + t.Run("std", func(t *testing.T) { + test.WaitSignal(t, func() error { + return runSyscallTesterFunc(context.Background(), t, syscallTester, "span-exec", "123", "456", "/usr/bin/touch", testFile) + }, func(event *model.Event, rule *rules.Rule) { + assertTriggeredRule(t, rule, "test_register_1") + }) + }) + + t.Run("pid1", func(t *testing.T) { + test.WaitSignal(t, func() error { + f, err := os.Create(testFile) + if err != nil { + return err + } + return f.Close() + }, func(event *model.Event, rule *rules.Rule) { + assertTriggeredRule(t, rule, "test_register_2") + }) + }) +} + func truncatedParents(t *testing.T, staticOpts testOpts, dynamicOpts dynamicTestOpts) { var truncatedParents string for i := 0; i < model.MaxPathDepth; i++ { diff --git a/pkg/security/tests/process_test.go b/pkg/security/tests/process_test.go index 518a2f929bd6b..405db99859fc0 100644 --- a/pkg/security/tests/process_test.go +++ b/pkg/security/tests/process_test.go @@ -141,7 +141,7 @@ func TestProcessContext(t *testing.T) { }, { ID: "test_rule_ancestors", - Expression: `open.file.path == "{{.Root}}/test-process-ancestors" && process.ancestors[_].file.name in ["sh", "dash", "bash"]`, + Expression: `open.file.path == "{{.Root}}/test-process-ancestors" && process.ancestors[A].file.name in ["sh", "dash", "bash"]`, }, { ID: "test_rule_parent", @@ -149,7 +149,7 @@ func TestProcessContext(t *testing.T) { }, { ID: "test_rule_pid1", - Expression: `open.file.path == "{{.Root}}/test-process-pid1" && process.ancestors[_].pid == 1`, + Expression: `open.file.path == "{{.Root}}/test-process-pid1" && process.ancestors[A].pid == 1`, }, { ID: "test_rule_args_envs", diff --git a/pkg/security/tests/schemas/kill.schema.json b/pkg/security/tests/schemas/kill.schema.json index ccf7fcb3ff192..7b91ef1008a18 100644 --- a/pkg/security/tests/schemas/kill.schema.json +++ b/pkg/security/tests/schemas/kill.schema.json @@ -33,30 +33,56 @@ "properties": { "signal": { "const": "SIGKILL" + }, + "status": { + "const": "performed" } }, "required": [ "type", "signal", + "scope", + "status", "created_at", "detected_at", "killed_at", - "exited_at" + "exited_at", + "ttr" ] }, { "properties": { "signal": { "const": "SIGUSR2" + }, + "status": { + "const": "performed" } }, "required": [ "type", "signal", + "scope", + "status", "created_at", "detected_at", "killed_at" ] + }, + { + "properties": { + "status": { + "const": "rule_disarmed" + } + }, + "required": [ + "type", + "signal", + "scope", + "status", + "created_at", + "detected_at" + ] } ] } \ No newline at end of file diff --git a/pkg/trace/api/evp_proxy.go b/pkg/trace/api/evp_proxy.go index 4ab15b70ab36f..452d73017aa8d 100644 --- a/pkg/trace/api/evp_proxy.go +++ b/pkg/trace/api/evp_proxy.go @@ -181,9 +181,9 @@ func (t *evpProxyTransport) RoundTrip(req *http.Request) (rresp *http.Response, timeout := getConfiguredEVPRequestTimeoutDuration(t.conf) req.Header.Set("X-Datadog-Timeout", strconv.Itoa((int(timeout.Seconds())))) deadline := time.Now().Add(timeout) - ctx, ctxCancel := context.WithDeadline(req.Context(), deadline) + //nolint:govet,lostcancel we don't need to manually cancel this context, we can rely on the parent context being cancelled + ctx, _ := context.WithDeadline(req.Context(), deadline) req = req.WithContext(ctx) - defer ctxCancel() // Set target URL and API key header (per domain) req.URL.Scheme = "https" diff --git a/pkg/trace/api/evp_proxy_test.go b/pkg/trace/api/evp_proxy_test.go index 3016b6e5ef53c..8f9afc4cb2570 100644 --- a/pkg/trace/api/evp_proxy_test.go +++ b/pkg/trace/api/evp_proxy_test.go @@ -505,4 +505,30 @@ func TestE2E(t *testing.T) { require.Equal(t, http.StatusBadGateway, resp.StatusCode, "Got: ", fmt.Sprint(resp.StatusCode)) assert.Equal(t, "http: proxy error: context deadline exceeded\n", logs) }) + + t.Run("chunked-response", func(t *testing.T) { + conf := newTestReceiverConfig() + conf.Site = "us3.datadoghq.com" + conf.Endpoints[0].APIKey = "test_api_key" + conf.EVPProxy.ReceiverTimeout = 1 // in seconds + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.Header().Set("Transfer-Encoding", "chunked") + w.Write([]byte(`Hello`)) + w.(http.Flusher).Flush() + time.Sleep(200 * time.Millisecond) + w.Write([]byte(`World`)) // this will be discarded if the context was cancelled + })) + + req := httptest.NewRequest("POST", "/mypath/mysubpath?arg=test", bytes.NewReader(randBodyBuf)) + req.Header.Set("X-Datadog-EVP-Subdomain", "my.subdomain") + resp, logs := sendRequestThroughForwarderAgainstDummyServer(conf, req, stats, strings.TrimPrefix(server.URL, "http://")) + + resp.Body.Close() + require.Equal(t, http.StatusOK, resp.StatusCode, "Got: ", fmt.Sprint(resp.StatusCode)) + assert.Equal(t, "", logs) + body, err := io.ReadAll(resp.Body) + assert.NoError(t, err) + assert.Equal(t, "HelloWorld", string(body)) + }) } diff --git a/pkg/trace/writer/trace_test.go b/pkg/trace/writer/trace_test.go index 9c3ecd7a4194e..3c6529e1cce11 100644 --- a/pkg/trace/writer/trace_test.go +++ b/pkg/trace/writer/trace_test.go @@ -257,7 +257,10 @@ func TestResetBuffer(t *testing.T) { runtime.ReadMemStats(&m) assert.Greater(t, m.HeapInuse, uint64(50*1e6)) + w.mu.Lock() w.resetBuffer() + w.mu.Unlock() + runtime.GC() runtime.ReadMemStats(&m) assert.Less(t, m.HeapInuse, uint64(50*1e6)) diff --git a/pkg/util/containerd/containerd_util.go b/pkg/util/containerd/containerd_util.go index 9b80304c894b4..88fe4ecd91820 100644 --- a/pkg/util/containerd/containerd_util.go +++ b/pkg/util/containerd/containerd_util.go @@ -13,12 +13,13 @@ import ( "encoding/json" "errors" "fmt" + "os" "strings" "time" + "github.com/hashicorp/go-multierror" "github.com/opencontainers/image-spec/identity" - "github.com/DataDog/datadog-agent/pkg/config/env" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" dderrors "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -453,15 +454,33 @@ func (c *ContainerdUtil) getMounts(ctx context.Context, expiration time.Duration return nil, nil, fmt.Errorf("No snapshots returned for image: %s", imageID) } - // Transforming mounts in case we're running in a container - if env.IsContainerized() { - for i := range mounts { - mounts[i].Source = strings.ReplaceAll(mounts[i].Source, "/var/lib", "/host/var/lib") - for j := range mounts[i].Options { - mounts[i].Options[j] = strings.ReplaceAll(mounts[i].Options[j], "/var/lib", "/host/var/lib") + for i := range mounts { + mounts[i].Source = sanitizePath(mounts[i].Source) + + var errs error + for j, opt := range mounts[i].Options { + for _, prefix := range []string{"upperdir=", "lowerdir=", "workdir="} { + if strings.HasPrefix(opt, prefix) { + trimmedOpt := strings.TrimPrefix(opt, prefix) + dirs := strings.Split(trimmedOpt, ":") + for n, dir := range dirs { + dirs[n] = sanitizePath(dir) + if _, err := os.Stat(dirs[n]); err != nil { + errs = multierror.Append(errs, fmt.Errorf("unreachable folder %s for overlayfs mount: %w", dir, err)) + } + } + mounts[i].Options[j] = prefix + strings.Join(dirs, ":") + } } + + log.Debugf("Sanitized overlayfs mount options to %s", strings.Join(mounts[i].Options, ",")) + } + + if errs != nil { + log.Warnf("Unreachable path detected in mounts for image %s: %s", imageID, errs.Error()) } } + return mounts, func(ctx context.Context) error { ctx = namespaces.WithNamespace(ctx, namespace) if err := cleanSnapshot(ctx); err != nil { @@ -474,6 +493,14 @@ func (c *ContainerdUtil) getMounts(ctx context.Context, expiration time.Duration }, nil } +func sanitizePath(path string) string { + if index := strings.Index(path, "/var/lib"); index != -1 { + return "/host" + path[index:] + } + + return path +} + // Mounts returns the mounts for an image func (c *ContainerdUtil) Mounts(ctx context.Context, expiration time.Duration, namespace string, img containerd.Image) ([]mount.Mount, error) { mounts, clean, err := c.getMounts(ctx, expiration, namespace, img) diff --git a/pkg/util/crashreport/crashreport.go b/pkg/util/crashreport/crashreport.go index 0bab9ca167051..8c04cdbb6c11c 100644 --- a/pkg/util/crashreport/crashreport.go +++ b/pkg/util/crashreport/crashreport.go @@ -115,6 +115,13 @@ func (wcr *WinCrashReporter) CheckForCrash() (*probe.WinCrashStatus, error) { if !ok { return nil, fmt.Errorf("Raw data has incorrect type") } + + // Crash dump processing is not done yet, nothing to send at the moment. Try later. + if crash.StatusCode == probe.WinCrashStatusCodeBusy { + log.Infof("Crash dump processing is busy") + return nil, nil + } + /* * originally did this with a sync.once. The problem is the check is run prior to the * system probe being successfully started. This is OK; we just need to detect the BSOD @@ -124,7 +131,7 @@ func (wcr *WinCrashReporter) CheckForCrash() (*probe.WinCrashStatus, error) { * we don't need to run any more */ wcr.hasRunOnce = true - if !crash.Success { + if crash.StatusCode == probe.WinCrashStatusCodeFailed { return nil, fmt.Errorf("Error getting crash data %s", crash.ErrString) } diff --git a/pkg/util/kubernetes/kubelet/types_kubelet.go b/pkg/util/kubernetes/kubelet/types_kubelet.go index 8cfceb4b8a451..28741633a172a 100644 --- a/pkg/util/kubernetes/kubelet/types_kubelet.go +++ b/pkg/util/kubernetes/kubelet/types_kubelet.go @@ -91,13 +91,14 @@ type ResourceName string // Resources name const ( - ResourceCPU ResourceName = "cpu" // Kubernetes GPU resource types by vendor as shown below // https://kubernetes.io/docs/tasks/manage-gpus/scheduling-gpus/ - ResourceNvidiaGPU ResourceName = "nvidia.com/gpu" - ResourceAMDGPU ResourceName = "amd.com/gpu" - ResourceIntelGPUi915 ResourceName = "gpu.intel.com/i915" - ResourceIntelGPUxe ResourceName = "gpu.intel.com/xe" + ResourceGenericNvidiaGPU ResourceName = "nvidia.com/gpu" + ResourcePrefixNvidiaMIG ResourceName = "nvidia.com/mig" + ResourcePrefixIntelGPU ResourceName = "gpu.intel.com/" + ResourcePrefixAMDGPU ResourceName = "amd.com/" + + ResourceCPU ResourceName = "cpu" ResourceMemory ResourceName = "memory" ResourceStorage ResourceName = "storage" ResourceEphemeralStorage ResourceName = "ephemeral-storage" @@ -105,7 +106,7 @@ const ( // GetGPUResourceNames returns the list of GPU resource names func GetGPUResourceNames() []ResourceName { - return []ResourceName{ResourceNvidiaGPU, ResourceAMDGPU, ResourceIntelGPUi915, ResourceIntelGPUxe} + return []ResourceName{ResourcePrefixNvidiaMIG, ResourceGenericNvidiaGPU, ResourcePrefixIntelGPU, ResourcePrefixAMDGPU} } // ResourceList is the type of fields in Pod.Spec.Containers.Resources diff --git a/pkg/util/trivy/trivy.go b/pkg/util/trivy/trivy.go index 385068d0518aa..a4fdb87e79c57 100644 --- a/pkg/util/trivy/trivy.go +++ b/pkg/util/trivy/trivy.go @@ -318,6 +318,7 @@ func (c *Collector) ScanContainerdImageFromSnapshotter(ctx context.Context, imgM if err != nil { return nil, fmt.Errorf("unable to get mounts for image %s, err: %w", imgMeta.ID, err) } + layers := extractLayersFromOverlayFSMounts(mounts) if len(layers) == 0 { return nil, fmt.Errorf("unable to extract layers from overlayfs mounts %+v for image %s", mounts, imgMeta.ID) diff --git a/releasenotes/notes/autodiscovery-add-check_tag_cardinality-2dae869a081bb4e5.yaml b/releasenotes/notes/autodiscovery-add-check_tag_cardinality-2dae869a081bb4e5.yaml new file mode 100644 index 0000000000000..947d4787582cd --- /dev/null +++ b/releasenotes/notes/autodiscovery-add-check_tag_cardinality-2dae869a081bb4e5.yaml @@ -0,0 +1,22 @@ +--- +features: + - | + Add `check_tag_cardinality` parameter config check. + + By default `check_tag_cardinality` is not set which doesn't change the behavior of the checks. + Once it is set in pod annotaions, it overrides the cardinality value provided in the base agent configuration. + Example of usage: + ```yaml + ad.datadoghq.com/redis.checks: | + { + "redisdb": { + "check_tag_cardinality": "high", + "instances": [ + { + "host": "%%host%%", + "port": "6379" + } + ] + } + } + ``` \ No newline at end of file diff --git a/releasenotes/notes/fix-configurable-host-tags-c71fb5597d20bffe.yaml b/releasenotes/notes/fix-configurable-host-tags-c71fb5597d20bffe.yaml new file mode 100644 index 0000000000000..02b6d7d69f2c1 --- /dev/null +++ b/releasenotes/notes/fix-configurable-host-tags-c71fb5597d20bffe.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fixes host tags with a configurable duration so the metric's context hash doesn't change, preventing the aggregator from mistaking it as a new metric. diff --git a/releasenotes/notes/kube-gpu-container-tag-38d7894664964220.yaml b/releasenotes/notes/kube-gpu-container-tag-38d7894664964220.yaml index 9f2ef8117177e..d4924025d3040 100644 --- a/releasenotes/notes/kube-gpu-container-tag-38d7894664964220.yaml +++ b/releasenotes/notes/kube-gpu-container-tag-38d7894664964220.yaml @@ -8,4 +8,4 @@ --- features: - | - Introduce new Kubernetes container tag `kube_gpu_type` for the GPU resource requested by a container. + Introduce new Kubernetes tag `gpu_vendor` for the GPU resource requested by a container. diff --git a/releasenotes/notes/oracle-reconnect-on-connection-lose-before-select-54d2b6ad6811d1ea.yaml b/releasenotes/notes/oracle-reconnect-on-connection-lose-before-select-54d2b6ad6811d1ea.yaml new file mode 100644 index 0000000000000..90ba69098fce0 --- /dev/null +++ b/releasenotes/notes/oracle-reconnect-on-connection-lose-before-select-54d2b6ad6811d1ea.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fix nil pointer error on Oracle DBM query when the check's connection is lost before SELECT statement executes. diff --git a/releasenotes/notes/remove-enable_http_stats_by_status_code-aa4df7b8f7a82ccc.yaml b/releasenotes/notes/remove-enable_http_stats_by_status_code-aa4df7b8f7a82ccc.yaml new file mode 100644 index 0000000000000..f42128ca1eb74 --- /dev/null +++ b/releasenotes/notes/remove-enable_http_stats_by_status_code-aa4df7b8f7a82ccc.yaml @@ -0,0 +1,12 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +deprecations: + - | + The feature flag `service_monitoring_config.enable_http_stats_by_status_code` was deprecated and removed. + No impact on USM's behavior. diff --git a/releasenotes/notes/system-probe-vlan-2aea22b74ae91c32.yaml b/releasenotes/notes/system-probe-vlan-2aea22b74ae91c32.yaml new file mode 100644 index 0000000000000..a0937fec1c607 --- /dev/null +++ b/releasenotes/notes/system-probe-vlan-2aea22b74ae91c32.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fixes an error in system-probe triggered by packet capture in environments with multiple VLANs. diff --git a/releasenotes/notes/windowscrashreportasync-1b2c77f9ebeafdd5.yaml b/releasenotes/notes/windowscrashreportasync-1b2c77f9ebeafdd5.yaml new file mode 100644 index 0000000000000..16552ffd1708b --- /dev/null +++ b/releasenotes/notes/windowscrashreportasync-1b2c77f9ebeafdd5.yaml @@ -0,0 +1,14 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + On Windows, the endpoint /windows_crash_detection/check has been modified to report crashes in + an asynchronous manner, to allow processing of large crash dumps without blocking or timing out. + The first check will return a busy status and continue to do so until the processing is completed. + diff --git a/tasks/__init__.py b/tasks/__init__.py index 9b061ac2cdbcd..c1457e608368d 100644 --- a/tasks/__init__.py +++ b/tasks/__init__.py @@ -9,6 +9,7 @@ from tasks import ( agent, + ami, bench, buildimages, cluster_agent, @@ -155,6 +156,7 @@ # add namespaced tasks to the root ns.add_collection(agent) +ns.add_collection(ami) ns.add_collection(buildimages) ns.add_collection(cluster_agent) ns.add_collection(cluster_agent_cloudfoundry) diff --git a/tasks/ami.py b/tasks/ami.py new file mode 100644 index 0000000000000..0654f24a63672 --- /dev/null +++ b/tasks/ami.py @@ -0,0 +1,98 @@ +import os + +from invoke import task + + +@task +def launch_instance(_ctx, ami_id, key_name, instance_type='t2.medium'): + """ + Launch an instance from an AMI. + + Example: + Run: aws-vault exec sso-agent-qa-account-admin -- deva ami.launch-instance --ami-id ami-0eef9d92ec044bc94 --key-name your-key-name + Then: ssh -i ~/.ssh/your-key.pem user@ip + """ + import boto3 + + ec2 = boto3.client('ec2') + response = ec2.run_instances( + ImageId=ami_id, + InstanceType=instance_type, + KeyName=key_name, + MaxCount=1, + MinCount=1, + NetworkInterfaces=[ + { + "SubnetId": "subnet-0f1ca3e929eb3fb8b", + "DeviceIndex": 0, + "AssociatePublicIpAddress": False, + "Groups": ["sg-070023ab71cadf760"], + }, + ], + TagSpecifications=[ + { + 'ResourceType': 'instance', + 'Tags': [ + {'Key': 'Name', 'Value': f"dd-agent-{os.environ.get('USER', os.environ.get('USERNAME'))}-{ami_id}"}, + ], + }, + ], + ) + + instance_id = response['Instances'][0]['InstanceId'] + print(f"Instance {instance_id} launched from AMI {ami_id}") + print(f"IP address: {response['Instances'][0]['PrivateIpAddress']}") + + +@task +def create_ami(_ctx, instance_id, ami_name, origin_ami, os, usage="test-ami"): + """ + Create an AMI from a running instance. + + Example: aws-vault exec sso-agent-qa-account-admin -- deva ami.create-ami --instance-id i-054d463dee21bd56f --ami-name test-ami --origin-ami ami-0eef9d92ec044bc94 --os debian-12-x86_64 + """ + import boto3 + + ec2 = boto3.client('ec2') + response = ec2.create_image( + InstanceId=instance_id, + Name=ami_name, + TagSpecifications=[ + { + 'ResourceType': 'image', + 'Tags': [ + { + 'Key': 'Usage', + 'Value': usage, + }, + { + 'Key': 'OriginAmi', + 'Value': origin_ami, + }, + { + 'Key': 'OS', + 'Value': os, # -, ie: debian-12-x86_64 + }, + ], + }, + ], + ) + + ami_id = response['ImageId'] + print(f"AMI {ami_id} created from instance {instance_id}") + + +@task +def delete_ami(_ctx, ami_id): + """ + Delete an AMI. + + Example: aws-vault exec sso-agent-qa-account-admin -- deva ami.delete-ami --ami-id ami-0890dd73c014b3a84 + """ + import boto3 + + ec2 = boto3.client('ec2') + ec2.deregister_image( + ImageId=ami_id, + ) + print(f"AMI {ami_id} deleted") diff --git a/tasks/buildimages.py b/tasks/buildimages.py index 6d679d631a1e5..38078c8aa64c6 100644 --- a/tasks/buildimages.py +++ b/tasks/buildimages.py @@ -4,7 +4,8 @@ from invoke import Context, task -from tasks.pipeline import update_circleci_config, update_gitlab_config, update_test_infra_def +from tasks.libs.ciproviders.circleci import update_circleci_config +from tasks.libs.ciproviders.gitlab_api import update_gitlab_config, update_test_infra_def @task( diff --git a/tasks/github_tasks.py b/tasks/github_tasks.py index 63e9b39e50f86..f77728a610ff1 100644 --- a/tasks/github_tasks.py +++ b/tasks/github_tasks.py @@ -22,6 +22,7 @@ from tasks.libs.common.datadog_api import create_gauge, send_metrics from tasks.libs.common.junit_upload_core import repack_macos_junit_tar from tasks.libs.common.utils import get_git_pretty_ref +from tasks.libs.owners.linter import codeowner_has_orphans, directory_has_packages_without_owner from tasks.libs.owners.parsing import read_owners from tasks.libs.pipeline.notifications import GITHUB_SLACK_MAP from tasks.release import _get_release_json_value @@ -126,48 +127,29 @@ def trigger_macos( @task -def lint_codeowner(_): +def lint_codeowner(_, owners_file=".github/CODEOWNERS"): """ - Check every package in `pkg` has an owner + Run multiple checks on the provided CODEOWNERS file """ base = os.path.dirname(os.path.abspath(__file__)) root_folder = os.path.join(base, "..") os.chdir(root_folder) - owners = _get_code_owners(root_folder) + exit_code = 0 - # make sure each root package has an owner - pkgs_without_owner = _find_packages_without_owner(owners, "pkg") - if len(pkgs_without_owner) > 0: - raise Exit( - f'The following packages in `pkg` directory don\'t have an owner in CODEOWNERS: {pkgs_without_owner}', - code=1, - ) + # Getting GitHub CODEOWNER file content + owners = read_owners(owners_file) + + # Define linters + linters = [directory_has_packages_without_owner, codeowner_has_orphans] + # Execute linters + for linter in linters: + if linter(owners): + exit_code = 1 -def _find_packages_without_owner(owners, folder): - pkg_without_owners = [] - for x in os.listdir(folder): - path = os.path.join("/" + folder, x) - if path not in owners: - pkg_without_owners.append(path) - return pkg_without_owners - - -def _get_code_owners(root_folder): - code_owner_path = os.path.join(root_folder, ".github", "CODEOWNERS") - owners = {} - with open(code_owner_path) as f: - for line in f: - line = line.strip() - line = line.split("#")[0] # remove comment - if len(line) > 0: - parts = line.split() - path = os.path.normpath(parts[0]) - # example /tools/retry_file_dump ['@DataDog/agent-metrics-logs'] - owners[path] = parts[1:] - return owners + raise Exit(code=exit_code) @task diff --git a/tasks/kernel_matrix_testing/compiler.py b/tasks/kernel_matrix_testing/compiler.py index cc093a0cdc795..d5c94b629c653 100644 --- a/tasks/kernel_matrix_testing/compiler.py +++ b/tasks/kernel_matrix_testing/compiler.py @@ -11,8 +11,8 @@ from invoke.runners import Result from tasks.kernel_matrix_testing.tool import Exit, info, warn +from tasks.libs.ciproviders.gitlab_api import GitlabYamlLoader from tasks.libs.types.arch import ARCH_AMD64, ARCH_ARM64, Arch -from tasks.pipeline import GitlabYamlLoader if TYPE_CHECKING: from tasks.kernel_matrix_testing.types import PathOrStr diff --git a/tasks/kernel_matrix_testing/download.py b/tasks/kernel_matrix_testing/download.py index 08bd014efd9d2..880789f06796e 100644 --- a/tasks/kernel_matrix_testing/download.py +++ b/tasks/kernel_matrix_testing/download.py @@ -130,7 +130,7 @@ def download_rootfs( branch = branch_mapping.get(f, "master") info(f"[+] {f} needs to be downloaded, using branch {branch}") filename = f"{f}.xz" - sum_file = f"{f}.sum" + sum_file = f"{f}.xz.sum" wo_qcow2 = '.'.join(f.split('.')[:-1]) manifest_file = f"{wo_qcow2}.manifest" # remove this file and sum, uncompressed file too if it exists diff --git a/tasks/kernel_matrix_testing/platforms.py b/tasks/kernel_matrix_testing/platforms.py index 1960d60b0756e..086ef4463a6f7 100644 --- a/tasks/kernel_matrix_testing/platforms.py +++ b/tasks/kernel_matrix_testing/platforms.py @@ -8,7 +8,7 @@ from tasks.kernel_matrix_testing.tool import Exit from tasks.kernel_matrix_testing.vars import KMT_SUPPORTED_ARCHS -from tasks.pipeline import GitlabYamlLoader +from tasks.libs.ciproviders.gitlab_api import GitlabYamlLoader if TYPE_CHECKING: from tasks.kernel_matrix_testing.types import ( diff --git a/tasks/kernel_matrix_testing/vmconfig.py b/tasks/kernel_matrix_testing/vmconfig.py index 64285d52b3f6a..7590a8513a31d 100644 --- a/tasks/kernel_matrix_testing/vmconfig.py +++ b/tasks/kernel_matrix_testing/vmconfig.py @@ -13,7 +13,7 @@ from tasks.kernel_matrix_testing.kmt_os import Linux, get_kmt_os from tasks.kernel_matrix_testing.platforms import filter_by_ci_component, get_platforms -from tasks.kernel_matrix_testing.stacks import check_and_get_stack, create_stack, stack_exists +from tasks.kernel_matrix_testing.stacks import check_and_get_stack, create_stack, destroy_stack, stack_exists from tasks.kernel_matrix_testing.tool import Exit, ask, convert_kmt_arch_or_local, info, warn from tasks.kernel_matrix_testing.vars import KMT_SUPPORTED_ARCHS, VMCONFIG from tasks.libs.types.arch import ARCH_AMD64, ARCH_ARM64, Arch @@ -660,7 +660,7 @@ def gen_config_for_stack( vmconfig_file = f"{get_kmt_os().stacks_dir}/{stack}/{VMCONFIG}" if os.path.exists(vmconfig_file) and not new: raise Exit( - "Editing configuration is current not supported. Destroy the stack first to change the configuration." + "Editing configuration is currently not supported. Destroy the stack first to change the configuration." ) if new or not os.path.exists(vmconfig_file): @@ -696,6 +696,7 @@ def gen_config_for_stack( if not yes and ask("are you sure you want to apply the diff? (y/n)") != "y": warn("[-] diff not applied") + destroy_stack(ctx, stack, False, None) return with open(vmconfig_file, "w") as f: diff --git a/tasks/kmt.py b/tasks/kmt.py index fb5ada35894a8..198dba2c3dcce 100644 --- a/tasks/kmt.py +++ b/tasks/kmt.py @@ -874,15 +874,15 @@ def build_run_config(run: str | None, packages: list[str]): c: dict[str, Any] = {} if len(packages) == 0: - return {"*": {"exclude": False}} + return {"filters": {"*": {"exclude": False}}} for p in packages: if p[:2] == "./": p = p[2:] if run is not None: - c[p] = {"run-only": [run]} + c["filters"][p] = {"run-only": [run]} else: - c[p] = {"exclude": False} + c["filters"][p] = {"exclude": False} return c diff --git a/tasks/libs/ciproviders/circleci.py b/tasks/libs/ciproviders/circleci.py new file mode 100644 index 0000000000000..68d996830e359 --- /dev/null +++ b/tasks/libs/ciproviders/circleci.py @@ -0,0 +1,16 @@ +import re + + +def update_circleci_config(file_path, image_tag, test_version): + """ + Override variables in .gitlab-ci.yml file + """ + image_name = "gcr.io/datadoghq/agent-circleci-runner" + with open(file_path) as circle: + circle_ci = circle.read() + match = re.search(rf"({image_name}(_test_only)?):([a-zA-Z0-9_-]+)\n", circle_ci) + if not match: + raise RuntimeError(f"Impossible to find the version of image {image_name} in circleci configuration file") + image = f"{image_name}_test_only" if test_version else image_name + with open(file_path, "w") as circle: + circle.write(circle_ci.replace(f"{match.group(0)}", f"{image}:{image_tag}\n")) diff --git a/tasks/libs/ciproviders/github_api.py b/tasks/libs/ciproviders/github_api.py index 73a54248cfd73..17860e68f08c6 100644 --- a/tasks/libs/ciproviders/github_api.py +++ b/tasks/libs/ciproviders/github_api.py @@ -370,6 +370,14 @@ def create_label(self, name, color, description=""): """ return self._repository.create_label(name, color, description) + def create_release(self, tag, message, draft=True): + return self._repository.create_git_release( + tag=tag, + name=tag, + message=message, + draft=draft, + ) + def get_github_teams(users): for user in users: diff --git a/tasks/libs/ciproviders/gitlab_api.py b/tasks/libs/ciproviders/gitlab_api.py index 170e71496fc8e..7d1aa67b18049 100644 --- a/tasks/libs/ciproviders/gitlab_api.py +++ b/tasks/libs/ciproviders/gitlab_api.py @@ -120,6 +120,24 @@ def refresh_pipeline(pipeline: ProjectPipeline): pipeline.refresh() +class GitlabReference(yaml.YAMLObject): + def __init__(self, refs): + self.refs = refs + + def __repr__(self): + return f'{self.__class__.__name__}=(refs={self.refs}' + + +def reference_constructor(loader, node): + return GitlabReference(loader.construct_sequence(node)) + + +def GitlabYamlLoader(): + loader = yaml.SafeLoader + loader.add_constructor('!reference', reference_constructor) + return loader + + class GitlabCIDiff: def __init__( self, @@ -1250,3 +1268,51 @@ def full_config_get_all_stages(full_config: dict) -> set[str]: all_stages.update(config.get("stages", [])) return all_stages + + +def update_test_infra_def(file_path, image_tag): + """ + Override TEST_INFRA_DEFINITIONS_BUILDIMAGES in `.gitlab/common/test_infra_version.yml` file + """ + with open(file_path) as gl: + file_content = gl.readlines() + with open(file_path, "w") as gl: + for line in file_content: + test_infra_def = re.search(r"TEST_INFRA_DEFINITIONS_BUILDIMAGES:\s*(\w+)", line) + if test_infra_def: + gl.write(line.replace(test_infra_def.group(1), image_tag)) + else: + gl.write(line) + + +def update_gitlab_config(file_path, image_tag, test_version): + """ + Override variables in .gitlab-ci.yml file + """ + with open(file_path) as gl: + file_content = gl.readlines() + gitlab_ci = yaml.load("".join(file_content), Loader=GitlabYamlLoader()) + # TEST_INFRA_DEFINITION_BUILDIMAGE label format differs from other buildimages + suffixes = [ + name + for name in gitlab_ci["variables"] + if name.endswith("SUFFIX") and not name.startswith("TEST_INFRA_DEFINITION") + ] + images = [name.replace("_SUFFIX", "") for name in suffixes] + with open(file_path, "w") as gl: + for line in file_content: + if any(re.search(rf"{suffix}:", line) for suffix in suffixes): + if test_version: + gl.write(line.replace('""', '"_test_only"')) + else: + gl.write(line.replace('"_test_only"', '""')) + elif any(re.search(rf"{image}:", line) for image in images): + current_version = re.search(r"v\d+-\w+", line) + if current_version: + gl.write(line.replace(current_version.group(0), image_tag)) + else: + raise RuntimeError( + f"Unable to find a version matching the v- pattern in line {line}" + ) + else: + gl.write(line) diff --git a/tasks/libs/common/utils.py b/tasks/libs/common/utils.py index 3b3afbc80c027..5fef10fd29cfc 100644 --- a/tasks/libs/common/utils.py +++ b/tasks/libs/common/utils.py @@ -532,7 +532,8 @@ def gitlab_section(section_name, collapsed=False, echo=False): """ - echo: If True, will echo the gitlab section in bold in CLI mode instead of not showing anything """ - section_id = section_name.replace(" ", "_").replace("/", "_") + # Replace with "_" every special character (" ", ":", "/", "\") which prevent the section generation + section_id = re.sub(r"[ :/\\]", "_", section_name) in_ci = running_in_gitlab_ci() try: if in_ci: diff --git a/tasks/libs/owners/linter.py b/tasks/libs/owners/linter.py new file mode 100644 index 0000000000000..64023fa4b70ae --- /dev/null +++ b/tasks/libs/owners/linter.py @@ -0,0 +1,100 @@ +import os +import sys + +from tasks.libs.common.color import color_message + + +def directory_has_packages_without_owner(owners, folder="pkg"): + """Check every package in `pkg` has an owner""" + + error = False + + for x in os.listdir(folder): + path = os.path.join("/" + folder, x) + if all(owner[1].rstrip('/') != path for owner in owners.paths): + if not error: + print( + color_message("The following packages don't have owner in CODEOWNER file", "red"), file=sys.stderr + ) + error = True + print(color_message(f"\t- {path}", "orange"), file=sys.stderr) + + return error + + +def codeowner_has_orphans(owners): + """Check that every rule in codeowners file point to an existing file/directory""" + + err_invalid_rule_path = False + err_orphans_path = False + + for rule in owners.paths: + try: + # Get the static part of the rule path, removing matching subpath (such as '*') + static_root = _get_static_root(rule[1]) + except Exception: + err_invalid_rule_path = True + print( + color_message( + f"[UNSUPPORTED] The following rule's path does not start with '/' anchor: {rule[1]}", "red" + ), + file=sys.stderr, + ) + continue + + if not _is_pattern_in_fs(static_root, rule[0]): + if not err_orphans_path: + print( + color_message( + "The following rules are outdated: they don't point to existing file/directory", "red" + ), + file=sys.stderr, + ) + err_orphans_path = True + print(color_message(f"\t- {rule[1]}\t{rule[2]}", "orange"), file=sys.stderr) + + return err_invalid_rule_path or err_orphans_path + + +def _get_static_root(pattern): + """_get_static_root returns the longest prefix path from the pattern without any wildcards.""" + result = "." + + if not pattern.startswith("/"): + raise Exception() + + # We remove the '/' anchor character from the path + pattern = pattern[1:] + + for elem in pattern.split("/"): + if '*' in elem: + return result + result = os.path.join(result, elem) + return result + + +def _is_pattern_in_fs(path, pattern): + """Checks if a given pattern matches any file within the specified path. + + Args: + path (str): The file or directory path to search within. + pattern (re.Pattern): The compiled regular expression pattern to match against file paths. + + Returns: + bool: True if the pattern matches any file path within the specified path, False otherwise. + """ + if os.path.isfile(path): + return True + elif os.path.isdir(path): + for root, _, files in os.walk(path): + # Check if root is matching the the pattern, without "./" at the begining + if pattern.match(root[2:]): + return True + for name in files: + # file_path is the relative path from the root of the repo, without "./" at the begining + file_path = os.path.join(root, name)[2:] + + # Check if the file path matches any of the regex patterns + if pattern.match(file_path): + return True + return False diff --git a/tasks/new_e2e_tests.py b/tasks/new_e2e_tests.py index ad5dd9e561ac5..40abe8121b4be 100644 --- a/tasks/new_e2e_tests.py +++ b/tasks/new_e2e_tests.py @@ -231,6 +231,10 @@ def cleanup_remote_stacks(ctx, stack_regex, pulumi_backend): if stack_regex.match(stack_id): to_delete_stacks.add(f"organization/e2eci/{stack_id}") + if len(to_delete_stacks) == 0: + print("No stacks to delete") + return + print("About to delete the following stacks:", to_delete_stacks) with multiprocessing.Pool(len(to_delete_stacks)) as pool: res = pool.map(destroy_remote_stack, to_delete_stacks) diff --git a/tasks/pipeline.py b/tasks/pipeline.py index 9a0ff2b3821d3..64a46e75b1117 100644 --- a/tasks/pipeline.py +++ b/tasks/pipeline.py @@ -13,14 +13,15 @@ from tasks.libs.ciproviders.github_api import GithubAPI from tasks.libs.ciproviders.gitlab_api import ( + GitlabYamlLoader, get_gitlab_bot_token, get_gitlab_repo, gitlab_configuration_is_modified, refresh_pipeline, ) from tasks.libs.common.color import Color, color_message -from tasks.libs.common.constants import DEFAULT_BRANCH, GITHUB_REPO_NAME -from tasks.libs.common.git import check_clean_branch_state, get_commit_sha, get_current_branch +from tasks.libs.common.constants import DEFAULT_BRANCH +from tasks.libs.common.git import get_commit_sha, get_current_branch from tasks.libs.common.utils import ( get_all_allowed_repo_branches, is_allowed_repo_branch, @@ -40,24 +41,6 @@ BOT_NAME = "github-actions[bot]" -class GitlabReference(yaml.YAMLObject): - def __init__(self, refs): - self.refs = refs - - def __repr__(self): - return f'{self.__class__.__name__}=(refs={self.refs}' - - -def reference_constructor(loader, node): - return GitlabReference(loader.construct_sequence(node)) - - -def GitlabYamlLoader(): - loader = yaml.SafeLoader - loader.add_constructor('!reference', reference_constructor) - return loader - - # Tasks to trigger pipelines @@ -756,86 +739,7 @@ def update_buildimages(ctx, image_tag, test_version=True, branch_name=None): Update local files to run with new image_tag from agent-buildimages and launch a full pipeline Use --no-test-version to commit without the _test_only suffixes """ - create_branch = branch_name is None - branch_name = verify_workspace(ctx, branch_name=branch_name) - update_gitlab_config(".gitlab-ci.yml", image_tag, test_version=test_version) - update_circleci_config(".circleci/config.yml", image_tag, test_version=test_version) - trigger_build(ctx, branch_name=branch_name, create_branch=create_branch) - - -def verify_workspace(ctx, branch_name=None): - """ - Assess we can modify files and commit without risk of local or upstream conflicts - """ - if branch_name is None: - user_name = ctx.run("whoami", hide="out") - branch_name = f"{user_name.stdout.rstrip()}/test_buildimages" - github = GithubAPI(repository=GITHUB_REPO_NAME) - check_clean_branch_state(ctx, github, branch_name) - return branch_name - - -def update_test_infra_def(file_path, image_tag): - """ - Override TEST_INFRA_DEFINITIONS_BUILDIMAGES in `.gitlab/common/test_infra_version.yml` file - """ - with open(file_path) as gl: - file_content = gl.readlines() - with open(file_path, "w") as gl: - for line in file_content: - test_infra_def = re.search(r"TEST_INFRA_DEFINITIONS_BUILDIMAGES:\s*(\w+)", line) - if test_infra_def: - gl.write(line.replace(test_infra_def.group(1), image_tag)) - else: - gl.write(line) - - -def update_gitlab_config(file_path, image_tag, test_version): - """ - Override variables in .gitlab-ci.yml file - """ - with open(file_path) as gl: - file_content = gl.readlines() - gitlab_ci = yaml.load("".join(file_content), Loader=GitlabYamlLoader()) - # TEST_INFRA_DEFINITION_BUILDIMAGE label format differs from other buildimages - suffixes = [ - name - for name in gitlab_ci["variables"] - if name.endswith("SUFFIX") and not name.startswith("TEST_INFRA_DEFINITION") - ] - images = [name.replace("_SUFFIX", "") for name in suffixes] - with open(file_path, "w") as gl: - for line in file_content: - if any(re.search(rf"{suffix}:", line) for suffix in suffixes): - if test_version: - gl.write(line.replace('""', '"_test_only"')) - else: - gl.write(line.replace('"_test_only"', '""')) - elif any(re.search(rf"{image}:", line) for image in images): - current_version = re.search(r"v\d+-\w+", line) - if current_version: - gl.write(line.replace(current_version.group(0), image_tag)) - else: - raise RuntimeError( - f"Unable to find a version matching the v- pattern in line {line}" - ) - else: - gl.write(line) - - -def update_circleci_config(file_path, image_tag, test_version): - """ - Override variables in .gitlab-ci.yml file - """ - image_name = "gcr.io/datadoghq/agent-circleci-runner" - with open(file_path) as circle: - circle_ci = circle.read() - match = re.search(rf"({image_name}(_test_only)?):([a-zA-Z0-9_-]+)\n", circle_ci) - if not match: - raise RuntimeError(f"Impossible to find the version of image {image_name} in circleci configuration file") - image = f"{image_name}_test_only" if test_version else image_name - with open(file_path, "w") as circle: - circle.write(circle_ci.replace(f"{match.group(0)}", f"{image}:{image_tag}\n")) + raise Exit(f"This invoke task is {color_message('deprecated', 'red')}, please use inv buildimages.update instead.") @task( @@ -859,22 +763,6 @@ def get_gitlab_config_image_tag(_, file_path=".gitlab-ci.yml"): return gitlab_ci["variables"]["DATADOG_AGENT_BUILDIMAGES"] -def trigger_build(ctx, branch_name=None, create_branch=False): - """ - Trigger a pipeline from current branch on-demand (useful for test image) - """ - if create_branch: - ctx.run(f"git checkout -b {branch_name}") - answer = input("Do you want to trigger a pipeline (will also commit and push)? [Y/n]\n") - if len(answer) == 0 or answer.casefold() == "y": - ctx.run("git add .gitlab-ci.yml .circleci/config.yml") - ctx.run("git commit -m 'Update buildimages version'") - ctx.run(f"git push origin {branch_name}") - print("Wait 10s to let Gitlab create the first events before triggering a new pipeline") - time.sleep(10) - run(ctx, here=True) - - @task( help={ 'owner-branch-name': 'Owner and branch names in the format /', diff --git a/tasks/release.py b/tasks/release.py index 57ed76ba1671f..bb7a232834f0b 100644 --- a/tasks/release.py +++ b/tasks/release.py @@ -1049,3 +1049,55 @@ def create_qa_cards(ctx, tag): return setup_ddqa(ctx) ctx.run(f"ddqa --auto create {version.previous_rc_version()} {tag} {get_labels(version)}") + + +@task +def create_github_release(_ctx, version, draft=True): + """ + Create a GitHub release for the given tag. + """ + import pandoc + + sections = ( + ("Agent", "CHANGELOG.rst"), + ("Datadog Cluster Agent", "CHANGELOG-DCA.rst"), + ) + + notes = [] + + for section, filename in sections: + text = pandoc.write(pandoc.read(file=filename), format="markdown_strict", options=["--wrap=none"]) + + header_found = False + lines = [] + + # Extract the section for the given version + for line in text.splitlines(): + # Move to the right section + if line.startswith("## " + version): + header_found = True + continue + + if header_found: + # Next version found, stop + if line.startswith("## "): + break + lines.append(line) + + # if we found the header, add the section to the final release note + if header_found: + notes.append(f"# {section}") + notes.extend(lines) + + if not notes: + print(f"No release notes found for {version}") + raise Exit(code=1) + + github = GithubAPI() + release = github.create_release( + version, + "\n".join(notes), + draft=draft, + ) + + print(f"Link to the release note: {release.html_url}") diff --git a/tasks/requirements_release_tasks.txt b/tasks/requirements_release_tasks.txt index 684ee2cb6cbfa..f4159a5204ad5 100644 --- a/tasks/requirements_release_tasks.txt +++ b/tasks/requirements_release_tasks.txt @@ -1,3 +1,4 @@ atlassian-python-api==3.41.3 yattag==1.15.2 reno==3.5.0 +pandoc==2.4 diff --git a/tasks/testwasher.py b/tasks/testwasher.py index 96eac587e403b..ac3623b468a09 100644 --- a/tasks/testwasher.py +++ b/tasks/testwasher.py @@ -189,6 +189,8 @@ def generate_flake_finder_pipeline(ctx, n=3, generate_config=False): and new_job['variables']['E2E_COMMIT_SHA'] == "$CI_COMMIT_SHA" ): new_job['variables']['E2E_COMMIT_SHA'] = "$PARENT_COMMIT_SHA" + if 'E2E_PRE_INITIALIZED' in new_job['variables']: + del new_job['variables']['E2E_PRE_INITIALIZED'] new_job["rules"] = [{"when": "always"}] new_job["needs"] = ["go_e2e_deps"] new_jobs[f"{job}-{i}"] = new_job diff --git a/tasks/unit_tests/circleci_tests.py b/tasks/unit_tests/circleci_tests.py new file mode 100644 index 0000000000000..73612edd8abc1 --- /dev/null +++ b/tasks/unit_tests/circleci_tests.py @@ -0,0 +1,45 @@ +import shutil +import subprocess +import unittest +from pathlib import Path + +import yaml + +from tasks.libs.ciproviders.circleci import update_circleci_config + + +class TestUpdateCircleCI(unittest.TestCase): + circleci_file = ".circleci/config.yml" + circleci_test = ".circleci/config-test.yml" + erroneous_file = "tasks/unit_tests/testdata/erroneous_circleci_config.yml" + + def setUp(self) -> None: + shutil.copy(self.circleci_file, self.circleci_test) + return super().setUp() + + def tearDown(self) -> None: + subprocess.run(f"git checkout -- {self.erroneous_file}".split()) + Path(self.circleci_test).unlink() + return super().tearDown() + + def test_nominal(self): + update_circleci_config(self.circleci_test, "1m4g3", test_version=True) + with open(self.circleci_test) as gl: + circle_ci = yaml.safe_load(gl) + full_image = circle_ci['templates']['job_template']['docker'][0]['image'] + image, version = full_image.split(":") + self.assertTrue(image.endswith("_test_only")) + self.assertEqual("1m4g3", version) + + def test_update_no_test(self): + update_circleci_config(self.circleci_test, "1m4g3", test_version=False) + with open(self.circleci_test) as gl: + circle_ci = yaml.safe_load(gl) + full_image = circle_ci['templates']['job_template']['docker'][0]['image'] + image, version = full_image.split(":") + self.assertFalse(image.endswith("_test_only")) + self.assertEqual("1m4g3", version) + + def test_raise(self): + with self.assertRaises(RuntimeError): + update_circleci_config(self.erroneous_file, "1m4g3", test_version=False) diff --git a/tasks/unit_tests/codeowner_linter_tests.py b/tasks/unit_tests/codeowner_linter_tests.py new file mode 100644 index 0000000000000..3be67647eef3e --- /dev/null +++ b/tasks/unit_tests/codeowner_linter_tests.py @@ -0,0 +1,42 @@ +import os +import shutil +import tempfile +import unittest + +from codeowners import CodeOwners + +from tasks.libs.owners.linter import codeowner_has_orphans, directory_has_packages_without_owner + + +class TestCodeownerLinter(unittest.TestCase): + def setUp(self): + self.test_dir = tempfile.mkdtemp() + self.fake_pkgs = ["fake_a", "fake_b", "fake_c"] + self.pkg_dir = os.path.join(self.test_dir, "pkg") + self.backup_cwd = os.getcwd() + + # Create pkgs dir + os.makedirs(self.pkg_dir) + for pkg in self.fake_pkgs: + os.makedirs(os.path.join(self.pkg_dir, pkg)) + + os.chdir(self.test_dir) + + def tearDown(self): + shutil.rmtree(self.test_dir) + os.chdir(self.backup_cwd) + + def test_all_pkg_have_codeowner(self): + codeowner = CodeOwners("\n".join("/pkg/" + pkg for pkg in self.fake_pkgs)) + self.assertFalse(directory_has_packages_without_owner(codeowner)) + self.assertFalse(codeowner_has_orphans(codeowner)) + + def test_pkg_is_missing_codeowner(self): + codeowner = CodeOwners("\n".join(os.path.join("/pkg/", pkg) for pkg in self.fake_pkgs[:-1])) + self.assertTrue(directory_has_packages_without_owner(codeowner)) + self.assertFalse(codeowner_has_orphans(codeowner)) + + def test_codeowner_rule_is_outdated(self): + codeowner = CodeOwners("\n".join(os.path.join("/pkg/", pkg) for pkg in [*self.fake_pkgs, "old_deleted_pkg"])) + self.assertFalse(directory_has_packages_without_owner(codeowner)) + self.assertTrue(codeowner_has_orphans(codeowner)) diff --git a/tasks/unit_tests/gitlab_api_tests.py b/tasks/unit_tests/gitlab_api_tests.py index 5f2add57d44f8..781777f2037d2 100644 --- a/tasks/unit_tests/gitlab_api_tests.py +++ b/tasks/unit_tests/gitlab_api_tests.py @@ -1,7 +1,9 @@ +import subprocess import unittest from collections import OrderedDict from unittest.mock import MagicMock, patch +import yaml from invoke import MockContext, Result from tasks.libs.ciproviders.gitlab_api import ( @@ -13,6 +15,7 @@ gitlab_configuration_is_modified, read_includes, retrieve_all_paths, + update_gitlab_config, ) @@ -478,3 +481,33 @@ def test_two_modified_files(self): diff = f'diff --git a/{file} b/{file}\nindex 561eb1a201..5e43218090 100644\n--- a/{file}\n+++ b/{file}\n@@ -1,4 +1,11 @@\n ---\n+rtloader_tests:\n+ stage: source_test\n+ needs: ["go_deps"]\n+ before_script:\n+ - source /root/.bashrc && conda activate $CONDA_ENV\n+ script: ["# Skipping go tests"]\n+\n nerd_tests\n stage: source_test\n needs: ["go_deps"]\ndiff --git a/{yaml} b/{yaml}\nindex 561eb1a201..5e43218090 100644\n--- a/{yaml}\n+++ b/{yaml}\n@@ -1,4 +1,11 @@\n ---\n+rtloader_tests:\n+ stage: source_test\n+ noods: ["go_deps"]\n+ before_script:\n+ - source /root/.bashrc && conda activate $CONDA_ENV\n+ script: ["# Skipping go tests"]\n+\n nerd_tests\n stage: source_test\n needs: ["go_deps"]' c = MockContext(run={"git diff HEAD^1..HEAD": Result(diff)}) self.assertTrue(gitlab_configuration_is_modified(c)) + + +class TestUpdateGitlabCI(unittest.TestCase): + gitlabci_file = "tasks/unit_tests/testdata/fake_gitlab-ci.yml" + erroneous_file = "tasks/unit_tests/testdata/erroneous_gitlab-ci.yml" + + def tearDown(self) -> None: + subprocess.run(f"git checkout -- {self.gitlabci_file} {self.erroneous_file}".split()) + return super().tearDown() + + def test_nominal(self): + update_gitlab_config(self.gitlabci_file, "1mageV3rsi0n", test_version=True) + with open(self.gitlabci_file) as gl: + gitlab_ci = yaml.safe_load(gl) + for variable, value in gitlab_ci["variables"].items(): + # TEST_INFRA_DEFINITION_BUILDIMAGE label format differs from other buildimages + if variable.endswith("_SUFFIX") and not variable.startswith("TEST_INFRA_DEFINITION"): + self.assertEqual("_test_only", value) + + def test_update_no_test(self): + update_gitlab_config(self.gitlabci_file, "1mageV3rsi0n", test_version=False) + with open(self.gitlabci_file) as gl: + gitlab_ci = yaml.safe_load(gl) + for variable, value in gitlab_ci["variables"].items(): + if variable.endswith("_SUFFIX"): + self.assertEqual("", value) + + def test_raise(self): + with self.assertRaises(RuntimeError): + update_gitlab_config(self.erroneous_file, "1mageV3rsi0n", test_version=False) diff --git a/tasks/unit_tests/pipeline_tests.py b/tasks/unit_tests/pipeline_tests.py index 366fc3b54bb92..9991b7ec32870 100644 --- a/tasks/unit_tests/pipeline_tests.py +++ b/tasks/unit_tests/pipeline_tests.py @@ -1,104 +1,13 @@ -import subprocess import unittest from datetime import datetime, timezone from unittest.mock import MagicMock, patch -import yaml from invoke import MockContext, Result from invoke.exceptions import Exit from tasks import pipeline -class TestVerifyWorkspace(unittest.TestCase): - @patch('tasks.pipeline.GithubAPI', autospec=True) - @patch('tasks.pipeline.check_clean_branch_state', new=MagicMock()) - def test_with_branch(self, mock_gh): - branch_test_name = "tryphon_tournesol" - context_mock = MockContext(run=Result("haddock")) - branch = pipeline.verify_workspace(context_mock, branch_test_name) - self.assertEqual(branch_test_name, branch) - mock_gh.assert_not_called() - - @patch('tasks.pipeline.GithubAPI', autospec=True) - @patch('tasks.pipeline.check_clean_branch_state', new=MagicMock()) - def test_without_branch(self, mock_gh): - context_mock = MockContext(run=Result("haddock")) - branch = pipeline.verify_workspace(context_mock, None) - self.assertEqual("haddock/test_buildimages", branch) - mock_gh.assert_called() - - @patch('tasks.pipeline.GithubAPI', autospec=True) - def test_bad_workspace(self, _): - with open(".gitignore", "a") as f: - f.write("# test comment") - with self.assertRaises(Exit): - context_mock = MockContext(run=Result("haddock")) - _ = pipeline.verify_workspace(context_mock) - subprocess.run("git checkout -- .gitignore".split()) - - -class TestUpdateGitlabCI(unittest.TestCase): - gitlabci_file = "tasks/unit_tests/testdata/fake_gitlab-ci.yml" - erroneous_file = "tasks/unit_tests/testdata/erroneous_gitlab-ci.yml" - - def tearDown(self) -> None: - subprocess.run(f"git checkout -- {self.gitlabci_file} {self.erroneous_file}".split()) - return super().tearDown() - - def test_nominal(self): - pipeline.update_gitlab_config(self.gitlabci_file, "1mageV3rsi0n", test_version=True) - with open(self.gitlabci_file) as gl: - gitlab_ci = yaml.safe_load(gl) - for variable, value in gitlab_ci["variables"].items(): - # TEST_INFRA_DEFINITION_BUILDIMAGE label format differs from other buildimages - if variable.endswith("_SUFFIX") and not variable.startswith("TEST_INFRA_DEFINITION"): - self.assertEqual("_test_only", value) - - def test_update_no_test(self): - pipeline.update_gitlab_config(self.gitlabci_file, "1mageV3rsi0n", test_version=False) - with open(self.gitlabci_file) as gl: - gitlab_ci = yaml.safe_load(gl) - for variable, value in gitlab_ci["variables"].items(): - if variable.endswith("_SUFFIX"): - self.assertEqual("", value) - - def test_raise(self): - with self.assertRaises(RuntimeError): - pipeline.update_gitlab_config(self.erroneous_file, "1mageV3rsi0n", test_version=False) - - -class TestUpdateCircleCI(unittest.TestCase): - circleci_file = "tasks/unit_tests/testdata/fake_circleci_config.yml" - erroneous_file = "tasks/unit_tests/testdata/erroneous_circleci_config.yml" - - def tearDown(self) -> None: - subprocess.run(f"git checkout -- {self.circleci_file} {self.erroneous_file}".split()) - return super().tearDown() - - def test_nominal(self): - pipeline.update_circleci_config(self.circleci_file, "1m4g3", test_version=True) - with open(self.circleci_file) as gl: - circle_ci = yaml.safe_load(gl) - full_image = circle_ci['templates']['job_template']['docker'][0]['image'] - image, version = full_image.split(":") - self.assertTrue(image.endswith("_test_only")) - self.assertEqual("1m4g3", version) - - def test_update_no_test(self): - pipeline.update_circleci_config(self.circleci_file, "1m4g3", test_version=False) - with open(self.circleci_file) as gl: - circle_ci = yaml.safe_load(gl) - full_image = circle_ci['templates']['job_template']['docker'][0]['image'] - image, version = full_image.split(":") - self.assertFalse(image.endswith("_test_only")) - self.assertEqual("1m4g3", version) - - def test_raise(self): - with self.assertRaises(RuntimeError): - pipeline.update_circleci_config(self.erroneous_file, "1m4g3", test_version=False) - - class TestCompareToItself(unittest.TestCase): context = MockContext( run={ diff --git a/tasks/update_go.py b/tasks/update_go.py index 548aff0374182..9794f8df5b82c 100644 --- a/tasks/update_go.py +++ b/tasks/update_go.py @@ -7,9 +7,10 @@ from invoke.tasks import task from tasks.go import tidy +from tasks.libs.ciproviders.circleci import update_circleci_config +from tasks.libs.ciproviders.gitlab_api import update_gitlab_config from tasks.libs.common.color import color_message from tasks.modules import DEFAULT_MODULES -from tasks.pipeline import update_circleci_config, update_gitlab_config GO_VERSION_FILE = "./.go-version" diff --git a/tasks/winbuildscripts/unittests.bat b/tasks/winbuildscripts/unittests.bat index ab7eb626ed44a..9673769c4c329 100644 --- a/tasks/winbuildscripts/unittests.bat +++ b/tasks/winbuildscripts/unittests.bat @@ -15,7 +15,10 @@ xcopy /e/s/h/q c:\mnt\*.* call %TEST_ROOT%\datadog-agent\tasks\winbuildscripts\extract-modcache.bat %TEST_ROOT%\datadog-agent modcache call %TEST_ROOT%\datadog-agent\tasks\winbuildscripts\extract-modcache.bat %TEST_ROOT%\datadog-agent modcache_tools -Powershell -C "%TEST_ROOT%\datadog-agent\tasks\winbuildscripts\unittests.ps1" || exit /b 2 +Powershell -File "%TEST_ROOT%\datadog-agent\tasks\winbuildscripts\unittests.ps1" +if %ERRORLEVEL% neq 0 ( + exit /b %ERRORLEVEL% +) goto :EOF diff --git a/tasks/winbuildscripts/unittests.ps1 b/tasks/winbuildscripts/unittests.ps1 index 5f4fc6a848b70..5285e6c9d86b4 100644 --- a/tasks/winbuildscripts/unittests.ps1 +++ b/tasks/winbuildscripts/unittests.ps1 @@ -55,8 +55,9 @@ if($err -ne 0){ [Environment]::Exit($err) } & inv -e test --junit-tar="$Env:JUNIT_TAR" --race --profile --rerun-fails=2 --coverage --cpus 8 --python-runtimes="$Env:PY_RUNTIMES" --python-home-2=$Env:Python2_ROOT_DIR --python-home-3=$Env:Python3_ROOT_DIR --save-result-json C:\mnt\$test_output_file $Env:EXTRA_OPTS --build-stdlib $TEST_WASHER_FLAG - -$err = $LASTEXITCODE +If ($LASTEXITCODE -ne "0") { + exit $LASTEXITCODE +} # Ignore upload failures $ErrorActionPreference = "Continue" @@ -65,10 +66,14 @@ $tmpfile = [System.IO.Path]::GetTempFileName() # 1. Upload coverage reports to Codecov & "$UT_BUILD_ROOT\tools\ci\fetch_secret.ps1" -parameterName "$Env:CODECOV_TOKEN" -tempFile "$tmpfile" If ($LASTEXITCODE -ne "0") { - exit $LASTEXITCODE + Write-Host "Failed to fetch CODECOV_TOKEN - ignoring" + exit "0" } $Env:CODECOV_TOKEN=$(cat "$tmpfile") & inv -e coverage.upload-to-codecov $Env:COVERAGE_CACHE_FLAG +if($LASTEXITCODE -ne "0"){ + Write-Host -ForegroundColor Red "coverage upload failed $err" +} # 2. Upload junit files # Copy test files to c:\mnt for further gitlab upload @@ -77,15 +82,15 @@ Get-ChildItem -Path "$UT_BUILD_ROOT" -Filter "junit-out-*.xml" -Recurse | ForEac } & "$UT_BUILD_ROOT\tools\ci\fetch_secret.ps1" -parameterName "$Env:API_KEY_ORG2" -tempFile "$tmpfile" If ($LASTEXITCODE -ne "0") { - exit $LASTEXITCODE + Write-Host "Failed to fetch API_KEY - ignoring" + exit "0" } $Env:DATADOG_API_KEY=$(cat "$tmpfile") Remove-Item "$tmpfile" & inv -e junit-upload --tgz-path $Env:JUNIT_TAR -if($err -ne 0){ - Write-Host -ForegroundColor Red "test failed $err" - [Environment]::Exit($err) +if($LASTEXITCODE -ne "0"){ + Write-Host -ForegroundColor Red "junit upload failed $err" } Write-Host Test passed \ No newline at end of file diff --git a/test/new-e2e/go.mod b/test/new-e2e/go.mod index 8b3af875e8ab7..c9ea0996658d8 100644 --- a/test/new-e2e/go.mod +++ b/test/new-e2e/go.mod @@ -59,8 +59,8 @@ require ( // `TEST_INFRA_DEFINITIONS_BUILDIMAGES` matches the commit sha in the module version // Example: github.com/DataDog/test-infra-definitions v0.0.0-YYYYMMDDHHmmSS-0123456789AB // => TEST_INFRA_DEFINITIONS_BUILDIMAGES: 0123456789AB - github.com/DataDog/test-infra-definitions v0.0.0-20241015101741-f694c4dc33e4 - github.com/aws/aws-sdk-go-v2 v1.32.0 + github.com/DataDog/test-infra-definitions v0.0.0-20241023110344-cd9a362371a8 + github.com/aws/aws-sdk-go-v2 v1.32.2 github.com/aws/aws-sdk-go-v2/config v1.27.40 github.com/aws/aws-sdk-go-v2/service/ec2 v1.164.2 github.com/aws/aws-sdk-go-v2/service/eks v1.44.1 @@ -73,11 +73,11 @@ require ( github.com/kr/pretty v0.3.1 github.com/mitchellh/mapstructure v1.5.0 github.com/pkg/sftp v1.13.6 - github.com/pulumi/pulumi-aws/sdk/v6 v6.54.2 - github.com/pulumi/pulumi-awsx/sdk/v2 v2.14.0 - github.com/pulumi/pulumi-eks/sdk/v2 v2.7.8 + github.com/pulumi/pulumi-aws/sdk/v6 v6.56.1 + github.com/pulumi/pulumi-awsx/sdk/v2 v2.16.1 + github.com/pulumi/pulumi-eks/sdk/v2 v2.7.8 // indirect github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.17.1 - github.com/pulumi/pulumi/sdk/v3 v3.133.0 + github.com/pulumi/pulumi/sdk/v3 v3.137.0 github.com/samber/lo v1.47.0 github.com/stretchr/testify v1.9.0 github.com/xeipuuv/gojsonschema v1.2.0 @@ -114,12 +114,12 @@ require ( github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.38 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.19 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.19 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.19 // indirect - github.com/aws/aws-sdk-go-v2/service/ecr v1.32.4 // indirect - github.com/aws/aws-sdk-go-v2/service/ecs v1.45.2 + github.com/aws/aws-sdk-go-v2/service/ecr v1.36.2 // indirect + github.com/aws/aws-sdk-go-v2/service/ecs v1.47.4 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.0 // indirect @@ -150,7 +150,7 @@ require ( github.com/emirpasic/gods v1.18.1 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect - github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fvbommel/sortorder v1.1.0 // indirect github.com/go-errors/errors v1.4.2 // indirect @@ -222,7 +222,7 @@ require ( github.com/pkg/term v1.1.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pulumi/appdash v0.0.0-20231130102222-75f619a67231 // indirect - github.com/pulumi/esc v0.9.1 // indirect + github.com/pulumi/esc v0.10.0 // indirect github.com/pulumi/pulumi-command/sdk v1.0.1 // indirect github.com/pulumi/pulumi-docker/sdk/v4 v4.5.5 // indirect github.com/pulumi/pulumi-libvirt/sdk v0.4.7 // indirect @@ -251,7 +251,7 @@ require ( github.com/xlab/treeprint v1.2.0 // indirect github.com/zclconf/go-cty v1.14.4 // indirect github.com/zorkian/go-datadog-api v2.30.0+incompatible - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect go.opentelemetry.io/otel v1.31.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 // indirect go.opentelemetry.io/otel/metric v1.31.0 // indirect @@ -324,11 +324,11 @@ require ( github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/pelletier/go-toml v1.2.0 // indirect github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.60.0 // indirect + github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.67.0 // indirect github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.56.0 // indirect - github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.59.0 // indirect - github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.59.0 // indirect - github.com/pulumi/pulumi-azure-native-sdk/v2 v2.60.0 // indirect + github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.67.0 // indirect + github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.67.0 // indirect + github.com/pulumi/pulumi-azure-native-sdk/v2 v2.67.0 // indirect github.com/pulumi/pulumi-gcp/sdk/v6 v6.67.1 // indirect github.com/pulumi/pulumi-gcp/sdk/v7 v7.38.0 // indirect github.com/shirou/gopsutil/v3 v3.24.4 // indirect diff --git a/test/new-e2e/go.sum b/test/new-e2e/go.sum index 28321f6b27f51..dc7d8894abb87 100644 --- a/test/new-e2e/go.sum +++ b/test/new-e2e/go.sum @@ -16,8 +16,8 @@ github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a h1:m9REhmyaWD5YJ0P53ygRHxKKo+KM+nw+zz0hEdKztMo= github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/test-infra-definitions v0.0.0-20241015101741-f694c4dc33e4 h1:9P0Ecgmk+so7agQVftk6ojEs7qlquhGakJ1BMh3jZ7A= -github.com/DataDog/test-infra-definitions v0.0.0-20241015101741-f694c4dc33e4/go.mod h1:0YN66dG8119K5c1QDt11MUwYxov6Yd0JX9+kmj45yQE= +github.com/DataDog/test-infra-definitions v0.0.0-20241023110344-cd9a362371a8 h1:+5OKFDTd5xx+FgMbpceZn59uyvtxARoju+3sGokcLSQ= +github.com/DataDog/test-infra-definitions v0.0.0-20241023110344-cd9a362371a8/go.mod h1:l0n0FQYdWWQxbI5a2EkuynRQIteUQcYOaOhdxD9TvJs= github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= github.com/DataDog/viper v1.13.5/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= @@ -57,8 +57,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= -github.com/aws/aws-sdk-go-v2 v1.32.0 h1:GuHp7GvMN74PXD5C97KT5D87UhIy4bQPkflQKbfkndg= -github.com/aws/aws-sdk-go-v2 v1.32.0/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo= +github.com/aws/aws-sdk-go-v2 v1.32.2 h1:AkNLZEyYMLnx/Q/mSKkcMqwNFXMAvFto9bNsHqcTduI= +github.com/aws/aws-sdk-go-v2 v1.32.2/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 h1:pT3hpW0cOHRJx8Y0DfJUEQuqPild8jRGmSFmBgvydr0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6/go.mod h1:j/I2++U0xX+cr44QjHay4Cvxj6FUbnxrgmqN3H1jTZA= github.com/aws/aws-sdk-go-v2/config v1.27.40 h1:sie4mPBGFOO+Z27+yHzvyN31G20h/bf2xb5mCbpLv2Q= @@ -67,20 +67,20 @@ github.com/aws/aws-sdk-go-v2/credentials v1.17.38 h1:iM90eRhCeZtlkzCNCG1JysOzJXG github.com/aws/aws-sdk-go-v2/credentials v1.17.38/go.mod h1:TCVYPZeQuLaYNEkf/TVn6k5k/zdVZZ7xH9po548VNNg= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 h1:C/d03NAmh8C4BZXhuRNboF/DqhBkBCeDiJDcaqIT5pA= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14/go.mod h1:7I0Ju7p9mCIdlrfS+JCgqcYD0VXz/N4yozsox+0o078= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.19 h1:Q/k5wCeJkSWs+62kDfOillkNIJ5NqmE3iOfm48g/W8c= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.19/go.mod h1:Wns1C66VvtA2Bv/cUBuKZKQKdjo7EVMhp90aAa+8oTI= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.19 h1:AYLE0lUfKvN6icFTR/p+NmD1amYKTbqHQ1Nm+jwE6BM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.19/go.mod h1:1giLakj64GjuH1NBzF/DXqly5DWHtMTaOzRZ53nFX0I= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 h1:UAsR3xA31QGf79WzpG/ixT9FZvQlh5HY1NRqSHBNOCk= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21/go.mod h1:JNr43NFf5L9YaG3eKTm7HQzls9J+A9YYcGI5Quh1r2Y= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 h1:6jZVETqmYCadGFvrYEQfC5fAQmlo80CeL5psbno6r0s= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21/go.mod h1:1SR0GbLlnN3QUmYaflZNiH1ql+1qrSiB2vwcJ+4UM60= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.19 h1:FKdiFzTxlTRO71p0C7VrLbkkdW8qfMKF5+ej6bTmkT0= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.19/go.mod h1:abO3pCj7WLQPTllnSeYImqFfkGrmJV0JovWo/gqT5N0= github.com/aws/aws-sdk-go-v2/service/ec2 v1.164.2 h1:Rts0EZgdi3tneJMXp+uKrZHbMxQIu0y5O/2MG6a2+hY= github.com/aws/aws-sdk-go-v2/service/ec2 v1.164.2/go.mod h1:j0V2ahvdX3mGIyXQSe9vjdIQvSxz3uaMM0bR7Y+0WCE= -github.com/aws/aws-sdk-go-v2/service/ecr v1.32.4 h1:nQAU2Yr+afkAvIV39mg7LrNYFNQP7ShwbmiJqx2fUKA= -github.com/aws/aws-sdk-go-v2/service/ecr v1.32.4/go.mod h1:keOS9j4fv5ASh7dV29lIpGw2QgoJwGFAyMU0uPvfax4= -github.com/aws/aws-sdk-go-v2/service/ecs v1.45.2 h1:DSFxt4HBQjlgKNMyYdME9cbB11FFi7umpTGbqJaS9nw= -github.com/aws/aws-sdk-go-v2/service/ecs v1.45.2/go.mod h1:er8WHbgZAl17Dmu41ifKmUrV7JPpiQnRc+XSrnu4qR8= +github.com/aws/aws-sdk-go-v2/service/ecr v1.36.2 h1:VDQaVwGOokbd3VUbHF+wupiffdrbAZPdQnr5XZMJqrs= +github.com/aws/aws-sdk-go-v2/service/ecr v1.36.2/go.mod h1:lvUlMghKYmSxSfv0vU7pdU/8jSY+s0zpG8xXhaGKCw0= +github.com/aws/aws-sdk-go-v2/service/ecs v1.47.4 h1:CTkPGE8fiElvLtYWl/U+Eu5+1fVXiZbJUjyVCRSRgxk= +github.com/aws/aws-sdk-go-v2/service/ecs v1.47.4/go.mod h1:sMFLFhL27cKYa/eQYZp4asvIwHsnJWrAzTUpy9AQdnU= github.com/aws/aws-sdk-go-v2/service/eks v1.44.1 h1:onUAzZXDsyXzyrmOGw/9p8Csl1NZkTDEs4URZ8covUY= github.com/aws/aws-sdk-go-v2/service/eks v1.44.1/go.mod h1:dg9l/W4hXygeRNydRB4LWKY/MwHJhfUomGJUBwI29Dw= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 h1:TToQNkvGguu209puTojY/ozlqy2d/SFNcoLIqTFi42g= @@ -197,8 +197,8 @@ github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZM github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= -github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= -github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -320,7 +320,6 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= @@ -500,22 +499,22 @@ github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4V github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/pulumi/appdash v0.0.0-20231130102222-75f619a67231 h1:vkHw5I/plNdTr435cARxCW6q9gc0S/Yxz7Mkd38pOb0= github.com/pulumi/appdash v0.0.0-20231130102222-75f619a67231/go.mod h1:murToZ2N9hNJzewjHBgfFdXhZKjY3z5cYC1VXk+lbFE= -github.com/pulumi/esc v0.9.1 h1:HH5eEv8sgyxSpY5a8yePyqFXzA8cvBvapfH8457+mIs= -github.com/pulumi/esc v0.9.1/go.mod h1:oEJ6bOsjYlQUpjf70GiX+CXn3VBmpwFDxUTlmtUN84c= -github.com/pulumi/pulumi-aws/sdk/v6 v6.54.2 h1:nOa8VQ06HHSI02X5LeVH95lKH5bKe9eQ/4uvuuJgS1s= -github.com/pulumi/pulumi-aws/sdk/v6 v6.54.2/go.mod h1:HWyVOgw2WogCRYxH6eRSKM7fNK+vHXxPKqrbx/oy0wI= -github.com/pulumi/pulumi-awsx/sdk/v2 v2.14.0 h1:GknlrxIweg8X65VcxJaUVdZIHhclZjdzEWxsLGnMR2Y= -github.com/pulumi/pulumi-awsx/sdk/v2 v2.14.0/go.mod h1:mB6jxy6GjMd1dmTA129GkHH5pyryYG/W0J1X2XznxW4= -github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.60.0 h1:qCpKZQECnZWXVMWfuTk6nfPfQoP+7zXPS5bHdeIh5Mc= -github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.60.0/go.mod h1:ILyyA8nuYMWOcU7sRqRVmakNeY4hxog7K4nMCL+IOjE= +github.com/pulumi/esc v0.10.0 h1:jzBKzkLVW0mePeanDRfqSQoCJ5yrkux0jIwAkUxpRKE= +github.com/pulumi/esc v0.10.0/go.mod h1:2Bfa+FWj/xl8CKqRTWbWgDX0SOD4opdQgvYSURTGK2c= +github.com/pulumi/pulumi-aws/sdk/v6 v6.56.1 h1:wA38Ep4sEphX+3YGwFfaxRHs7NQv8dNObFepX6jaRa4= +github.com/pulumi/pulumi-aws/sdk/v6 v6.56.1/go.mod h1:m/ejZ2INurqq/ncDjJfgC1Ff/lnbt0J/uO33BnPVots= +github.com/pulumi/pulumi-awsx/sdk/v2 v2.16.1 h1:6082hB+ILpPB/0V5F+LTmHbX1BO54tCVOQCVOL/FYI4= +github.com/pulumi/pulumi-awsx/sdk/v2 v2.16.1/go.mod h1:z2bnBPHNYfk72IW1P01H9qikBtBSBhCwi3QpH6Y/38Q= +github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.67.0 h1:mgmmbFEoc1YOu81K9Bl/MVWE8cGloEdiCeIw394vXcM= +github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.67.0/go.mod h1:WmvulRFoc+dOk/el9y6u7z3CvA+yljL8HJXajmvZTYo= github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.56.0 h1:MFOd6X9FPlixzriy14fBHv7pFCCh/mu1pwHtSSjqfJ4= github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.56.0/go.mod h1:453Ff5wNscroYfq+zxME7Nbt7HdZv+dh0zLZwLyGBws= -github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.59.0 h1:ijcCyi+SPlJn3aIEb4p23FTk6fxjPLtVMhfkRaKp85A= -github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.59.0/go.mod h1:yQXpYXNeGVBcygd5Be/fzf+1Jcg4kDLAMZY6UDtIZvQ= -github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.59.0 h1:mqs2dlpcyYn2LsA20bC8xN30YaVs7x8M6tC7BtDiY64= -github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.59.0/go.mod h1:OTv2GUMWUktcvdjXFRaAdJDW1f/SuRSCKHdVCcQrN7U= -github.com/pulumi/pulumi-azure-native-sdk/v2 v2.60.0 h1:Q+we+HFtnNGkeXIhdWIKJZWJRwaIBUuMcZKG70YIYyw= -github.com/pulumi/pulumi-azure-native-sdk/v2 v2.60.0/go.mod h1:guTN5l9syK6v4+94APSi9np3rj1JPrPUEOG+B0dDaZE= +github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.67.0 h1:jvruQQSO1ESk7APFQ3mAge7C9SWKU9nbBHrilcyeSGU= +github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.67.0/go.mod h1:d5nmekK1mrjM9Xo/JGGVlAs7mqqftBo3DmKji+1zbmw= +github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.67.0 h1:r26Xl6FdOJnbLs1ny9ekuRjFxAocZK8jS8SLrgXKEFE= +github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.67.0/go.mod h1:8yXZtmHe2Zet5pb8gZ7D730d0VAm4kYUdwCj7sjhz6g= +github.com/pulumi/pulumi-azure-native-sdk/v2 v2.67.0 h1:FgfXLypiQ/DKWRPQpyNaftXcGl5HVgA93msBZTQ6Ddk= +github.com/pulumi/pulumi-azure-native-sdk/v2 v2.67.0/go.mod h1:0y4wJUCX1eA3ZSn0jJIRXtHeJA7qgbPfkrR9qvj+5D4= github.com/pulumi/pulumi-command/sdk v1.0.1 h1:ZuBSFT57nxg/fs8yBymUhKLkjJ6qmyN3gNvlY/idiN0= github.com/pulumi/pulumi-command/sdk v1.0.1/go.mod h1:C7sfdFbUIoXKoIASfXUbP/U9xnwPfxvz8dBpFodohlA= github.com/pulumi/pulumi-docker/sdk/v4 v4.5.5 h1:7OjAfgLz5PAy95ynbgPAlWls5WBe4I/QW/61TdPWRlQ= @@ -534,8 +533,8 @@ github.com/pulumi/pulumi-random/sdk/v4 v4.16.6 h1:M9BSF13bQxj74C61nBTVITrsgT6oRR github.com/pulumi/pulumi-random/sdk/v4 v4.16.6/go.mod h1:l5ew7S/G1GspPLH9KeWXqxQ4ZmS2hh2sEMv3bW9M3yc= github.com/pulumi/pulumi-tls/sdk/v4 v4.11.1 h1:tXemWrzeVTqG8zq6hBdv1TdPFXjgZ+dob63a/6GlF1o= github.com/pulumi/pulumi-tls/sdk/v4 v4.11.1/go.mod h1:hODo3iEmmXDFOXqPK+V+vwI0a3Ww7BLjs5Tgamp86Ng= -github.com/pulumi/pulumi/sdk/v3 v3.133.0 h1:o+7dbJZY9BVgAjOF5GYIWgjp/zpKAgWZwD4pPjUMXKQ= -github.com/pulumi/pulumi/sdk/v3 v3.133.0/go.mod h1:J5kQEX8v87aeUhk6NdQXnjCo1DbiOnOiL3Sf2DuDda8= +github.com/pulumi/pulumi/sdk/v3 v3.137.0 h1:bxhYpOY7Z4xt+VmezEpHuhjpOekkaMqOjzxFg/1OhCw= +github.com/pulumi/pulumi/sdk/v3 v3.137.0/go.mod h1:PvKsX88co8XuwuPdzolMvew5lZV+4JmZfkeSjj7A6dI= github.com/pulumiverse/pulumi-time/sdk v0.1.0 h1:xfi9HKDgV+GgDxQ23oSv9KxC3DQqViGTcMrJICRgJv0= github.com/pulumiverse/pulumi-time/sdk v0.1.0/go.mod h1:NUa1zA74DF002WrM6iF111A6UjX9knPpXufVRvBwNyg= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= @@ -654,8 +653,8 @@ github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgr github.com/zorkian/go-datadog-api v2.30.0+incompatible h1:R4ryGocppDqZZbnNc5EDR8xGWF/z/MxzWnqTUijDQes= github.com/zorkian/go-datadog-api v2.30.0+incompatible/go.mod h1:PkXwHX9CUQa/FpB9ZwAD45N1uhCW4MT/Wj7m36PbKss= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= @@ -859,7 +858,7 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= +google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7 h1:ImUcDPHjTrAqNhlOkSocDLfG9rrNHH7w7uoKWPaWZ8s= google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0= google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw= google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= diff --git a/test/new-e2e/pkg/environments/aws/kubernetes/eks.go b/test/new-e2e/pkg/environments/aws/kubernetes/eks.go index 6e3f5e6070577..fe5bac77b5748 100644 --- a/test/new-e2e/pkg/environments/aws/kubernetes/eks.go +++ b/test/new-e2e/pkg/environments/aws/kubernetes/eks.go @@ -10,30 +10,19 @@ import ( "context" "fmt" - "github.com/DataDog/test-infra-definitions/common/config" "github.com/DataDog/test-infra-definitions/common/utils" - "github.com/DataDog/test-infra-definitions/components" "github.com/DataDog/test-infra-definitions/components/datadog/agent/helm" dogstatsdstandalone "github.com/DataDog/test-infra-definitions/components/datadog/dogstatsd-standalone" fakeintakeComp "github.com/DataDog/test-infra-definitions/components/datadog/fakeintake" "github.com/DataDog/test-infra-definitions/components/datadog/kubernetesagentparams" - kubeComp "github.com/DataDog/test-infra-definitions/components/kubernetes" "github.com/DataDog/test-infra-definitions/resources/aws" - localEks "github.com/DataDog/test-infra-definitions/resources/aws/eks" + "github.com/DataDog/test-infra-definitions/scenarios/aws/eks" "github.com/DataDog/test-infra-definitions/scenarios/aws/fakeintake" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional" - "github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2" - awsEks "github.com/pulumi/pulumi-aws/sdk/v6/go/aws/eks" - awsIam "github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam" - "github.com/pulumi/pulumi-eks/sdk/v2/go/eks" - "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes" - appsv1 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/apps/v1" - corev1 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/core/v1" - metav1 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/meta/v1" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) @@ -79,296 +68,68 @@ func EKSRunFunc(ctx *pulumi.Context, env *environments.Kubernetes, params *Provi } } - clusterComp, err := components.NewComponent(&awsEnv, awsEnv.Namer.ResourceName("eks"), func(comp *kubeComp.Cluster) error { - // Create Cluster SG - clusterSG, err := ec2.NewSecurityGroup(ctx, awsEnv.Namer.ResourceName("eks-sg"), &ec2.SecurityGroupArgs{ - NamePrefix: awsEnv.CommonNamer().DisplayName(255, pulumi.String("eks-sg")), - Description: pulumi.StringPtr("EKS Cluster sg for stack: " + ctx.Stack()), - Ingress: ec2.SecurityGroupIngressArray{ - ec2.SecurityGroupIngressArgs{ - SecurityGroups: pulumi.ToStringArray(awsEnv.EKSAllowedInboundSecurityGroups()), - PrefixListIds: pulumi.ToStringArray(awsEnv.EKSAllowedInboundPrefixLists()), - ToPort: pulumi.Int(22), - FromPort: pulumi.Int(22), - Protocol: pulumi.String("tcp"), - }, - ec2.SecurityGroupIngressArgs{ - SecurityGroups: pulumi.ToStringArray(awsEnv.EKSAllowedInboundSecurityGroups()), - PrefixListIds: pulumi.ToStringArray(awsEnv.EKSAllowedInboundPrefixLists()), - ToPort: pulumi.Int(443), - FromPort: pulumi.Int(443), - Protocol: pulumi.String("tcp"), - }, - }, - VpcId: pulumi.StringPtr(awsEnv.DefaultVPCID()), - }, awsEnv.WithProviders(config.ProviderAWS)) - if err != nil { - return err - } + cluster, err := eks.NewCluster(awsEnv, params.name, params.eksOptions...) + if err != nil { + return err + } - // Cluster role - clusterRole, err := localEks.GetClusterRole(awsEnv, "eks-cluster-role") - if err != nil { - return err - } + if err := cluster.Export(ctx, &env.KubernetesCluster.ClusterOutput); err != nil { + return err + } - // IAM Node role - linuxNodeRole, err := localEks.GetNodeRole(awsEnv, "eks-linux-node-role") - if err != nil { - return err - } + if awsEnv.InitOnly() { + return nil + } - windowsNodeRole, err := localEks.GetNodeRole(awsEnv, "eks-windows-node-role") - if err != nil { - return err + var fakeIntake *fakeintakeComp.Fakeintake + if params.fakeintakeOptions != nil { + fakeIntakeOptions := []fakeintake.Option{ + fakeintake.WithCPU(1024), + fakeintake.WithMemory(6144), } - - // Fargate Configuration - var fargateProfile pulumi.Input - if fargateNamespace := awsEnv.EKSFargateNamespace(); fargateNamespace != "" { - fargateProfile = pulumi.Any( - eks.FargateProfile{ - Selectors: []awsEks.FargateProfileSelector{ - { - Namespace: fargateNamespace, - }, - }, - }, - ) + if awsEnv.GetCommonEnvironment().InfraShouldDeployFakeintakeWithLB() { + fakeIntakeOptions = append(fakeIntakeOptions, fakeintake.WithLoadBalancer()) } - // Create an EKS cluster with the default configuration. - cluster, err := eks.NewCluster(ctx, awsEnv.Namer.ResourceName("eks"), &eks.ClusterArgs{ - Name: awsEnv.CommonNamer().DisplayName(100), - Version: pulumi.StringPtr(awsEnv.KubernetesVersion()), - EndpointPrivateAccess: pulumi.BoolPtr(true), - EndpointPublicAccess: pulumi.BoolPtr(false), - Fargate: fargateProfile, - ClusterSecurityGroup: clusterSG, - NodeAssociatePublicIpAddress: pulumi.BoolRef(false), - PrivateSubnetIds: awsEnv.RandomSubnets(), - VpcId: pulumi.StringPtr(awsEnv.DefaultVPCID()), - SkipDefaultNodeGroup: pulumi.BoolRef(true), - // The content of the aws-auth map is the merge of `InstanceRoles` and `RoleMappings`. - // For managed node groups, we push the value in `InstanceRoles`. - // For unmanaged node groups, we push the value in `RoleMappings` - RoleMappings: eks.RoleMappingArray{ - eks.RoleMappingArgs{ - Groups: pulumi.ToStringArray([]string{"system:bootstrappers", "system:nodes", "eks:kube-proxy-windows"}), - Username: pulumi.String("system:node:{{EC2PrivateDNSName}}"), - RoleArn: windowsNodeRole.Arn, - }, - }, - InstanceRoles: awsIam.RoleArray{ - linuxNodeRole, - }, - ServiceRole: clusterRole, - ProviderCredentialOpts: &eks.KubeconfigOptionsArgs{ - ProfileName: pulumi.String(awsEnv.Profile()), - }, - }, pulumi.Timeouts(&pulumi.CustomTimeouts{ - Create: "30m", - Update: "30m", - Delete: "30m", - }), awsEnv.WithProviders(config.ProviderEKS, config.ProviderAWS)) - if err != nil { + if fakeIntake, err = fakeintake.NewECSFargateInstance(awsEnv, "ecs", fakeIntakeOptions...); err != nil { return err } - - if awsEnv.InitOnly() { - return nil + if err := fakeIntake.Export(awsEnv.Ctx(), &env.FakeIntake.FakeintakeOutput); err != nil { + return err } + } else { + env.FakeIntake = nil + } - kubeConfig, err := cluster.GetKubeconfig(ctx, &eks.ClusterGetKubeconfigArgs{ - ProfileName: pulumi.String(awsEnv.Profile()), - }) - + // Deploy the agent + dependsOnSetup := utils.PulumiDependsOn(cluster) + if params.agentOptions != nil { + params.agentOptions = append(params.agentOptions, kubernetesagentparams.WithPulumiResourceOptions(dependsOnSetup), kubernetesagentparams.WithFakeintake(fakeIntake)) + kubernetesAgent, err := helm.NewKubernetesAgent(&awsEnv, "eks", cluster.KubeProvider, params.agentOptions...) if err != nil { return err } - - // Building Kubernetes provider - eksKubeProvider, err := kubernetes.NewProvider(awsEnv.Ctx(), awsEnv.Namer.ResourceName("k8s-provider"), &kubernetes.ProviderArgs{ - Kubeconfig: kubeConfig, - EnableServerSideApply: pulumi.BoolPtr(true), - DeleteUnreachable: pulumi.BoolPtr(true), - }, awsEnv.WithProviders(config.ProviderAWS)) + err = kubernetesAgent.Export(ctx, &env.Agent.KubernetesAgentOutput) if err != nil { return err } + } else { + env.Agent = nil + } - // Filling Kubernetes component from EKS cluster - comp.ClusterName = cluster.EksCluster.Name() - comp.KubeConfig = kubeConfig - comp.KubeProvider = eksKubeProvider - - // Create configuration for POD subnets if any - workloadDeps := make([]pulumi.Resource, 0) - if podSubnets := awsEnv.EKSPODSubnets(); len(podSubnets) > 0 { - eniConfigs, err := localEks.NewENIConfigs(awsEnv, podSubnets, awsEnv.DefaultSecurityGroups(), pulumi.Provider(eksKubeProvider)) - if err != nil { - return err - } - - // Setting AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG is mandatory for EKS CNI to work with ENIConfig CRD - dsPatch, err := appsv1.NewDaemonSetPatch(awsEnv.Ctx(), awsEnv.Namer.ResourceName("eks-custom-network"), &appsv1.DaemonSetPatchArgs{ - Metadata: metav1.ObjectMetaPatchArgs{ - Namespace: pulumi.String("kube-system"), - Name: pulumi.String("aws-node"), - Annotations: pulumi.StringMap{ - "pulumi.com/patchForce": pulumi.String("true"), - }, - }, - Spec: appsv1.DaemonSetSpecPatchArgs{ - Template: corev1.PodTemplateSpecPatchArgs{ - Spec: corev1.PodSpecPatchArgs{ - Containers: corev1.ContainerPatchArray{ - corev1.ContainerPatchArgs{ - Name: pulumi.StringPtr("aws-node"), - Env: corev1.EnvVarPatchArray{ - corev1.EnvVarPatchArgs{ - Name: pulumi.String("AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG"), - Value: pulumi.String("true"), - }, - corev1.EnvVarPatchArgs{ - Name: pulumi.String("ENI_CONFIG_LABEL_DEF"), - Value: pulumi.String("topology.kubernetes.io/zone"), - }, - corev1.EnvVarPatchArgs{ - Name: pulumi.String("ENABLE_PREFIX_DELEGATION"), - Value: pulumi.String("true"), - }, - corev1.EnvVarPatchArgs{ - Name: pulumi.String("WARM_IP_TARGET"), - Value: pulumi.String("1"), - }, - corev1.EnvVarPatchArgs{ - Name: pulumi.String("MINIMUM_IP_TARGET"), - Value: pulumi.String("1"), - }, - }, - }, - }, - }, - }, - }, - }, pulumi.Provider(eksKubeProvider), utils.PulumiDependsOn(eniConfigs)) - if err != nil { - return err - } - - workloadDeps = append(workloadDeps, eniConfigs, dsPatch) - } - - // Create managed node groups - if params.eksLinuxNodeGroup { - ng, err := localEks.NewLinuxNodeGroup(awsEnv, cluster, linuxNodeRole) - if err != nil { - return err - } - workloadDeps = append(workloadDeps, ng) - } - - if params.eksLinuxARMNodeGroup { - ng, err := localEks.NewLinuxARMNodeGroup(awsEnv, cluster, linuxNodeRole) - if err != nil { - return err - } - workloadDeps = append(workloadDeps, ng) - } - - if params.eksBottlerocketNodeGroup { - ng, err := localEks.NewBottlerocketNodeGroup(awsEnv, cluster, linuxNodeRole) - if err != nil { - return err - } - workloadDeps = append(workloadDeps, ng) - } - - // Create unmanaged node groups - if params.eksWindowsNodeGroup { - _, err := localEks.NewWindowsNodeGroup(awsEnv, cluster, windowsNodeRole) - if err != nil { - return err - } - } - - // Applying necessary Windows configuration if Windows nodes - // Custom networking is not available for Windows nodes, using normal subnets IPs - if params.eksWindowsNodeGroup { - _, err := corev1.NewConfigMapPatch(awsEnv.Ctx(), awsEnv.Namer.ResourceName("eks-cni-cm"), &corev1.ConfigMapPatchArgs{ - Metadata: metav1.ObjectMetaPatchArgs{ - Namespace: pulumi.String("kube-system"), - Name: pulumi.String("amazon-vpc-cni"), - Annotations: pulumi.StringMap{ - "pulumi.com/patchForce": pulumi.String("true"), - }, - }, - Data: pulumi.StringMap{ - "enable-windows-ipam": pulumi.String("true"), - }, - }, pulumi.Provider(eksKubeProvider)) - if err != nil { - return err - } - } - - var fakeIntake *fakeintakeComp.Fakeintake - if params.fakeintakeOptions != nil { - fakeIntakeOptions := []fakeintake.Option{ - fakeintake.WithCPU(1024), - fakeintake.WithMemory(6144), - } - if awsEnv.GetCommonEnvironment().InfraShouldDeployFakeintakeWithLB() { - fakeIntakeOptions = append(fakeIntakeOptions, fakeintake.WithLoadBalancer()) - } - - if fakeIntake, err = fakeintake.NewECSFargateInstance(awsEnv, "ecs", fakeIntakeOptions...); err != nil { - return err - } - if err := fakeIntake.Export(awsEnv.Ctx(), &env.FakeIntake.FakeintakeOutput); err != nil { - return err - } - } else { - env.FakeIntake = nil - } - - // Deploy the agent - dependsOnSetup := utils.PulumiDependsOn(workloadDeps...) - if params.agentOptions != nil { - params.agentOptions = append(params.agentOptions, kubernetesagentparams.WithPulumiResourceOptions(dependsOnSetup), kubernetesagentparams.WithFakeintake(fakeIntake)) - kubernetesAgent, err := helm.NewKubernetesAgent(&awsEnv, "eks", eksKubeProvider, params.agentOptions...) - if err != nil { - return err - } - err = kubernetesAgent.Export(ctx, &env.Agent.KubernetesAgentOutput) - if err != nil { - return err - } - } else { - env.Agent = nil - } - - // Deploy standalone dogstatsd - if params.deployDogstatsd { - if _, err := dogstatsdstandalone.K8sAppDefinition(&awsEnv, eksKubeProvider, "dogstatsd-standalone", fakeIntake, true, ""); err != nil { - return err - } + // Deploy standalone dogstatsd + if params.deployDogstatsd { + if _, err := dogstatsdstandalone.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "dogstatsd-standalone", fakeIntake, true, ""); err != nil { + return err } + } - // Deploy workloads - for _, appFunc := range params.workloadAppFuncs { - _, err := appFunc(&awsEnv, eksKubeProvider) - if err != nil { - return err - } + // Deploy workloads + for _, appFunc := range params.workloadAppFuncs { + _, err := appFunc(&awsEnv, cluster.KubeProvider) + if err != nil { + return err } - - return nil - }) - if err != nil { - return err } - - return clusterComp.Export(ctx, &env.KubernetesCluster.ClusterOutput) + return nil } diff --git a/test/new-e2e/pkg/environments/aws/kubernetes/params.go b/test/new-e2e/pkg/environments/aws/kubernetes/params.go index 1084e6f41bc32..abc939d523bfb 100644 --- a/test/new-e2e/pkg/environments/aws/kubernetes/params.go +++ b/test/new-e2e/pkg/environments/aws/kubernetes/params.go @@ -17,6 +17,7 @@ import ( kubeComp "github.com/DataDog/test-infra-definitions/components/kubernetes" "github.com/DataDog/test-infra-definitions/resources/aws" "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" + "github.com/DataDog/test-infra-definitions/scenarios/aws/eks" "github.com/DataDog/test-infra-definitions/scenarios/aws/fakeintake" "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes" @@ -28,6 +29,7 @@ type ProvisionerParams struct { vmOptions []ec2.VMOption agentOptions []kubernetesagentparams.Option fakeintakeOptions []fakeintake.Option + eksOptions []eks.Option extraConfigParams runner.ConfigMap workloadAppFuncs []WorkloadAppFunc @@ -45,6 +47,7 @@ func newProvisionerParams() *ProvisionerParams { vmOptions: []ec2.VMOption{}, agentOptions: []kubernetesagentparams.Option{}, fakeintakeOptions: []fakeintake.Option{}, + eksOptions: []eks.Option{}, extraConfigParams: runner.ConfigMap{}, workloadAppFuncs: []WorkloadAppFunc{}, @@ -101,34 +104,10 @@ func WithFakeIntakeOptions(opts ...fakeintake.Option) ProvisionerOption { } } -// WithEKSLinuxNodeGroup enable Linux node group -func WithEKSLinuxNodeGroup() ProvisionerOption { +// WithEKSOptions adds options to the EKS cluster +func WithEKSOptions(opts ...eks.Option) ProvisionerOption { return func(params *ProvisionerParams) error { - params.eksLinuxNodeGroup = true - return nil - } -} - -// WithEKSLinuxARMNodeGroup enable ARM node group -func WithEKSLinuxARMNodeGroup() ProvisionerOption { - return func(params *ProvisionerParams) error { - params.eksLinuxARMNodeGroup = true - return nil - } -} - -// WithEKSBottlerocketNodeGroup enable AWS Bottle rocket node group -func WithEKSBottlerocketNodeGroup() ProvisionerOption { - return func(params *ProvisionerParams) error { - params.eksBottlerocketNodeGroup = true - return nil - } -} - -// WithEKSWindowsNodeGroup enable Windows node group -func WithEKSWindowsNodeGroup() ProvisionerOption { - return func(params *ProvisionerParams) error { - params.eksWindowsNodeGroup = true + params.eksOptions = opts return nil } } diff --git a/test/new-e2e/pkg/utils/infra/retriable_errors.go b/test/new-e2e/pkg/utils/infra/retriable_errors.go index 7d28f17006460..1c2ee5f3b1812 100644 --- a/test/new-e2e/pkg/utils/infra/retriable_errors.go +++ b/test/new-e2e/pkg/utils/infra/retriable_errors.go @@ -50,5 +50,9 @@ func getKnownErrors() []knownError { errorMessage: `error while waiting for fakeintake`, retryType: ReCreate, }, + { + errorMessage: `ssh: handshake failed: ssh: unable to authenticate`, + retryType: ReCreate, + }, } } diff --git a/test/new-e2e/system-probe/test-runner/files/no_usm.json b/test/new-e2e/system-probe/test-runner/files/no_usm.json index 60dcef477454a..e6335a18db4dd 100644 --- a/test/new-e2e/system-probe/test-runner/files/no_usm.json +++ b/test/new-e2e/system-probe/test-runner/files/no_usm.json @@ -3,6 +3,9 @@ "pkg/network/usm": { "exclude": true }, + "pkg/network/usm/tests": { + "exclude": true + }, "*": { "exclude": false } diff --git a/test/new-e2e/system-probe/test-runner/files/only_usm.json b/test/new-e2e/system-probe/test-runner/files/only_usm.json index d63d56fbd9617..1b9c7766e5e64 100644 --- a/test/new-e2e/system-probe/test-runner/files/only_usm.json +++ b/test/new-e2e/system-probe/test-runner/files/only_usm.json @@ -2,6 +2,9 @@ "filters": { "pkg/network/usm": { "exclude": false + }, + "pkg/network/usm/tests": { + "exclude": false } } } diff --git a/test/new-e2e/tests/agent-metrics-logs/log-agent/linux-log/journald/journald_tailing_test.go b/test/new-e2e/tests/agent-metrics-logs/log-agent/linux-log/journald/journald_tailing_test.go index 621164a59d577..49ea808f1fc88 100644 --- a/test/new-e2e/tests/agent-metrics-logs/log-agent/linux-log/journald/journald_tailing_test.go +++ b/test/new-e2e/tests/agent-metrics-logs/log-agent/linux-log/journald/journald_tailing_test.go @@ -87,7 +87,7 @@ func (s *LinuxJournaldFakeintakeSuite) journaldLogCollection() { // Restart agent and make sure it's ready before adding logs _, err = s.Env().RemoteHost.Execute("sudo systemctl restart datadog-agent") assert.NoErrorf(t, err, "Failed to restart the agent: %s", err) - s.EventuallyWithT(func(_ *assert.CollectT) { + s.EventuallyWithT(func(t *assert.CollectT) { agentReady := s.Env().Agent.Client.IsReady() assert.True(t, agentReady) }, utils.WaitFor, eventuallyWithTickDuration, "Agent was not ready") diff --git a/test/new-e2e/tests/agent-platform/platforms/platforms.json b/test/new-e2e/tests/agent-platform/platforms/platforms.json index a0aee09367140..6af9b65dafb02 100644 --- a/test/new-e2e/tests/agent-platform/platforms/platforms.json +++ b/test/new-e2e/tests/agent-platform/platforms/platforms.json @@ -3,12 +3,12 @@ "x86_64": { "debian-9": "ami-0182559468c1975fe", "debian-10": "ami-067a196d70cb53732", - "debian-11": "ami-0607e701db389efe7", - "debian-12": "ami-07edaec601cf2b6d3" + "debian-11": "ami-0698acab5370075a9", + "debian-12": "ami-0eef9d92ec044bc94" }, "arm64": { "debian-10": "ami-0b6ee4b8f4aa91fb4", - "debian-11": "ami-00988b9ead6afb0b1", + "debian-11": "ami-0eec63b0513577808", "debian-12": "ami-02aab8d5301cb8d68" } }, @@ -63,8 +63,8 @@ }, "suse": { "x86_64": { - "sles-12": "ami-08d21b039336d9351", - "sles-15": "ami-08f3662e2d5b3989a" + "sles-12": "ami-058c3f61e5f37c43f", + "sles-15": "ami-067dfda331f8296b0" }, "arm64": { "sles-15": "ami-0d446ba26bbe19573" diff --git a/test/new-e2e/tests/agent-platform/step-by-step/step_by_step_test.go b/test/new-e2e/tests/agent-platform/step-by-step/step_by_step_test.go index fcec9ed915b38..01ba9d04182a5 100644 --- a/test/new-e2e/tests/agent-platform/step-by-step/step_by_step_test.go +++ b/test/new-e2e/tests/agent-platform/step-by-step/step_by_step_test.go @@ -262,8 +262,8 @@ func (is *stepByStepSuite) StepByStepSuseTest(VMclient *common.TestClient) { var err error // Disable all existing non-datadog repos to avoid issues during refresh (which is hard to prevent zypper from doing spontaneously); - // we don't need them to install the Agent anyway - ExecuteWithoutError(nil, VMclient, "sudo rm /etc/zypp/repos.d/*.repo") + // we don't need them to install the Agent anyway. + ExecuteWithoutError(nil, VMclient, "sudo rm -f /etc/zypp/repos.d/*.repo") fileContent := fmt.Sprintf("[datadog]\n"+ "name = Datadog, Inc.\n"+ diff --git a/test/new-e2e/tests/containers/ecs_test.go b/test/new-e2e/tests/containers/ecs_test.go index 42d3f6a40c7e1..ab48a7fbb367e 100644 --- a/test/new-e2e/tests/containers/ecs_test.go +++ b/test/new-e2e/tests/containers/ecs_test.go @@ -78,7 +78,7 @@ func (suite *ecsSuite) SetupSuite() { suite.Require().NoError(fakeintake.Init(suite)) suite.Fakeintake = fakeintake.Client() - clusterSerialized, err := json.Marshal(stackOutput.Outputs["dd-Cluster-ecs-cluster"].Value) + clusterSerialized, err := json.Marshal(stackOutput.Outputs["dd-Cluster-ecs"].Value) suite.Require().NoError(err) ecsCluster := &ecsComp.ClusterOutput{} suite.Require().NoError(ecsCluster.Import(clusterSerialized, ecsCluster)) diff --git a/test/new-e2e/tests/containers/k8s_test.go b/test/new-e2e/tests/containers/k8s_test.go index 2badd72b23401..0da696934b35f 100644 --- a/test/new-e2e/tests/containers/k8s_test.go +++ b/test/new-e2e/tests/containers/k8s_test.go @@ -192,6 +192,25 @@ func (suite *k8sSuite) testUpAndRunning(waitFor time.Duration) { }) } +func (suite *k8sSuite) TestAdmissionControllerWebhooksExist() { + ctx := context.Background() + expectedWebhookName := "datadog-webhook" + + suite.Run("agent registered mutating webhook configuration", func() { + mutatingConfigs, err := suite.K8sClient.AdmissionregistrationV1().MutatingWebhookConfigurations().List(ctx, metav1.ListOptions{}) + suite.Require().NoError(err) + suite.NotEmpty(mutatingConfigs.Items, "No mutating webhook configuration found") + found := false + for _, mutatingConfig := range mutatingConfigs.Items { + if mutatingConfig.Name == expectedWebhookName { + found = true + break + } + } + suite.Require().True(found, fmt.Sprintf("None of the mutating webhook configurations have the name '%s'", expectedWebhookName)) + }) +} + func (suite *k8sSuite) TestVersion() { ctx := context.Background() versionExtractor := regexp.MustCompile(`Commit: ([[:xdigit:]]+)`) diff --git a/test/new-e2e/tests/discovery/linux_test.go b/test/new-e2e/tests/discovery/linux_test.go index d48cc30962b7c..9238137beed99 100644 --- a/test/new-e2e/tests/discovery/linux_test.go +++ b/test/new-e2e/tests/discovery/linux_test.go @@ -33,7 +33,13 @@ type linuxTestSuite struct { e2e.BaseSuite[environments.Host] } -var services = []string{"python-svc", "python-instrumented", "node-json-server", "node-instrumented"} +var services = []string{ + "python-svc", + "python-instrumented", + "node-json-server", + "node-instrumented", + "rails-svc", +} func TestLinuxTestSuite(t *testing.T) { agentParams := []func(*agentparams.Params) error{ @@ -125,6 +131,15 @@ func (s *linuxTestSuite) TestServiceDiscoveryCheck() { assert.NotZero(c, found.Payload.RSSMemory) } + found = foundMap["rails_hello"] + if assert.NotNil(c, found) { + assert.Equal(c, "rails_hello", found.Payload.ServiceName) + assert.Equal(c, "rails_hello", found.Payload.GeneratedServiceName) + assert.Empty(c, found.Payload.DDService) + assert.Empty(c, found.Payload.ServiceNameSource) + assert.NotZero(c, found.Payload.RSSMemory) + } + assert.Contains(c, foundMap, "json-server") }, 3*time.Minute, 10*time.Second) } diff --git a/test/new-e2e/tests/discovery/testdata/provision/provision.sh b/test/new-e2e/tests/discovery/testdata/provision/provision.sh index 93bb7a0deeec1..852a21e3ca789 100755 --- a/test/new-e2e/tests/discovery/testdata/provision/provision.sh +++ b/test/new-e2e/tests/discovery/testdata/provision/provision.sh @@ -2,6 +2,47 @@ set -e +install_systemd_unit() { + while [[ $# -ge 2 ]]; do + case $1 in + --workdir) + shift + workdir="WorkingDirectory=$1" + shift + ;; + *) + break + ;; + esac + done + + name=$1 + command=$2 + port=$3 + extraenv=$4 + + cat > "/etc/systemd/system/${name}.service" <<- EOM +[Unit] +Description=${name} +After=network.target +StartLimitIntervalSec=0 + +[Service] +Type=simple +Restart=always +RestartSec=1 +User=root +ExecStart=${command} +Environment="PORT=${port}" +Environment="NODE_VERSION=20" +Environment="${extraenv}" +${workdir} + +[Install] +WantedBy=multi-user.target +EOM +} + apt-get update apt-get install -y \ ca-certificates \ @@ -28,43 +69,33 @@ nvm install 20 || nvm install 20 || nvm install 20 npm install json-server || npm install json-server npm install /home/ubuntu/e2e-test/node/instrumented -# Install our own services -install_systemd_unit () { - name=$1 - command=$2 - port=$3 - extraenv=$4 - - cat > "/etc/systemd/system/${name}.service" <<- EOM -[Unit] -Description=${name} -After=network.target -StartLimitIntervalSec=0 - -[Service] -Type=simple -Restart=always -RestartSec=1 -User=root -ExecStart=${command} -Environment="PORT=${port}" -Environment="NODE_VERSION=20" -Environment="${extraenv}" +# Install Ruby +## Packages +apt-get install -y \ + ruby \ + ruby-dev \ + ruby-rails \ + sqlite3 \ -[Install] -WantedBy=multi-user.target -EOM -} +## Create new Rails project +pushd /home/ubuntu +rails new rails-hello --minimal +bundle install --gemfile=/home/ubuntu/rails-hello/Gemfile +popd -# Node +# Install our own services +## Node install_systemd_unit "node-json-server" "$NVM_DIR/nvm-exec npx json-server --port 8084 /home/ubuntu/e2e-test/node/json-server/db.json" "8084" "" install_systemd_unit "node-instrumented" "$NVM_DIR/nvm-exec node /home/ubuntu/e2e-test/node/instrumented/server.js" "8085" "" -# Python +## Python install_systemd_unit "python-svc" "/usr/bin/python3 /home/ubuntu/e2e-test/python/server.py" "8082" "DD_SERVICE=python-svc-dd" install_systemd_unit "python-instrumented" "/usr/bin/python3 /home/ubuntu/e2e-test/python/instrumented.py" "8083" "" +## Ruby +install_systemd_unit --workdir "/home/ubuntu/rails-hello" "rails-svc" "rails server" "7777" "" + systemctl daemon-reload -# leave them stopped +## leave them stopped systemctl stop python-svc diff --git a/test/new-e2e/tests/gpu/gpu_test.go b/test/new-e2e/tests/gpu/gpu_test.go new file mode 100644 index 0000000000000..4b1680190d350 --- /dev/null +++ b/test/new-e2e/tests/gpu/gpu_test.go @@ -0,0 +1,106 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +package gpu + +import ( + "encoding/json" + "flag" + "fmt" + "testing" + + "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" + + "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" + "github.com/DataDog/test-infra-definitions/components/os" + + "github.com/DataDog/datadog-agent/pkg/util/testutil/flake" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" + awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclient" +) + +var devMode = flag.Bool("devmode", false, "enable dev mode") + +type gpuSuite struct { + e2e.BaseSuite[environments.Host] +} + +const defaultGpuCheckConfig = ` +init_config: + min_collection_interval: 5 + +instances: + - {} +` + +const defaultSysprobeConfig = ` +gpu_monitoring: + enabled: true +` + +const vectorAddDockerImg = "nvcr.io/nvidia/k8s/cuda-sample:vectoradd-cuda10.2" +const gpuEnabledAMI = "ami-0f71e237bb2ba34be" // Ubuntu 22.04 with GPU drivers + +// TestGPUSuite runs tests for the VM interface to ensure its implementation is correct. +func TestGPUSuite(t *testing.T) { + // Marked as flaky pending removal of unattended-upgrades in the AMI + flake.Mark(t) + + provisioner := awshost.Provisioner( + awshost.WithEC2InstanceOptions( + ec2.WithInstanceType("g4dn.xlarge"), + ec2.WithAMI(gpuEnabledAMI, os.Ubuntu2204, os.AMD64Arch), + ), + awshost.WithAgentOptions( + agentparams.WithIntegration("gpu.d", defaultGpuCheckConfig), + agentparams.WithSystemProbeConfig(defaultSysprobeConfig), + ), + awshost.WithDocker(), + ) + + suiteParams := []e2e.SuiteOption{e2e.WithProvisioner(provisioner)} + if *devMode { + suiteParams = append(suiteParams, e2e.WithDevMode()) + } + + e2e.Run(t, &gpuSuite{}, suiteParams...) +} + +func (v *gpuSuite) SetupSuite() { + v.BaseSuite.SetupSuite() + + v.Env().RemoteHost.MustExecute(fmt.Sprintf("docker pull %s", vectorAddDockerImg)) +} + +// TODO: Extract this to common package? service_discovery uses it too +type checkStatus struct { + CheckID string `json:"CheckID"` + CheckName string `json:"CheckName"` + CheckConfigSource string `json:"CheckConfigSource"` + ExecutionTimes []int `json:"ExecutionTimes"` + LastError string `json:"LastError"` +} + +type runnerStats struct { + Checks map[string]checkStatus `json:"Checks"` +} + +type collectorStatus struct { + RunnerStats runnerStats `json:"runnerStats"` +} + +func (v *gpuSuite) TestGPUCheckIsEnabled() { + statusOutput := v.Env().Agent.Client.Status(agentclient.WithArgs([]string{"collector", "--json"})) + + var status collectorStatus + err := json.Unmarshal([]byte(statusOutput.Content), &status) + v.Require().NoError(err, "failed to unmarshal agent status") + v.Require().Contains(status.RunnerStats.Checks, "gpu") + + gpuCheckStatus := status.RunnerStats.Checks["gpu"] + v.Require().Equal(gpuCheckStatus.LastError, "") +} diff --git a/test/new-e2e/tests/installer/host/systemd.go b/test/new-e2e/tests/installer/host/systemd.go index 4bf7725f3a12e..7633d0d04c0c6 100644 --- a/test/new-e2e/tests/installer/host/systemd.go +++ b/test/new-e2e/tests/installer/host/systemd.go @@ -106,7 +106,7 @@ func (h *Host) AssertSystemdEvents(since JournaldTimestamp, events SystemdEventS } lastSearchedEvents = searchedEvents return j == len(events.Events) - }, 30*time.Second, 1*time.Second) + }, 60*time.Second, 1*time.Second) if !success { logs := h.journaldLogsSince(since) diff --git a/test/new-e2e/tests/installer/unix/all_packages_test.go b/test/new-e2e/tests/installer/unix/all_packages_test.go index 7f2360f89a1d2..0c7909fc6373f 100644 --- a/test/new-e2e/tests/installer/unix/all_packages_test.go +++ b/test/new-e2e/tests/installer/unix/all_packages_test.go @@ -229,6 +229,11 @@ func envForceVersion(pkg, version string) string { } func (s *packageBaseSuite) Purge() { + // Reset the systemctl failed counter, best effort as they may not be loaded + for _, service := range []string{agentUnit, agentUnitXP, traceUnit, traceUnitXP, processUnit, processUnitXP, probeUnit, probeUnitXP, securityUnit, securityUnitXP} { + s.Env().RemoteHost.Execute(fmt.Sprintf("sudo systemctl reset-failed %s", service)) + } + s.Env().RemoteHost.MustExecute("sudo apt-get remove -y --purge datadog-installer || sudo yum remove -y datadog-installer || sudo zypper remove -y datadog-installer") } diff --git a/test/new-e2e/tests/installer/unix/package_agent_test.go b/test/new-e2e/tests/installer/unix/package_agent_test.go index 30e4f52630a0d..571d7a87eae41 100644 --- a/test/new-e2e/tests/installer/unix/package_agent_test.go +++ b/test/new-e2e/tests/installer/unix/package_agent_test.go @@ -348,6 +348,10 @@ func (s *packageAgentSuite) TestExperimentStopped() { s.host.Run(`sudo systemctl start datadog-agent-exp --no-block`) // ensure experiment is running + s.host.WaitForUnitActive( + "datadog-agent-trace-exp.service", + "datadog-agent-process-exp.service", + ) s.host.AssertSystemdEvents(timestamp, host.SystemdEvents().Started(traceUnitXP)) s.host.AssertSystemdEvents(timestamp, host.SystemdEvents().Started(processUnitXP)) s.host.AssertSystemdEvents(timestamp, host.SystemdEvents().Skipped(securityUnitXP)) @@ -370,7 +374,7 @@ func (s *packageAgentSuite) TestExperimentStopped() { Unordered(host.SystemdEvents(). Started(traceUnit). Started(processUnit). - SkippedIf(probeUnitXP, s.installMethod != InstallMethodAnsible). + SkippedIf(probeUnit, s.installMethod != InstallMethodAnsible). Skipped(securityUnit), ), ) diff --git a/test/new-e2e/tests/npm/eks_1host_test.go b/test/new-e2e/tests/npm/eks_1host_test.go index e882d3387304c..12f86c5fd37e2 100644 --- a/test/new-e2e/tests/npm/eks_1host_test.go +++ b/test/new-e2e/tests/npm/eks_1host_test.go @@ -17,6 +17,7 @@ import ( kubeComp "github.com/DataDog/test-infra-definitions/components/kubernetes" "github.com/DataDog/test-infra-definitions/resources/aws" "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" + "github.com/DataDog/test-infra-definitions/scenarios/aws/eks" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" @@ -73,7 +74,7 @@ func eksHttpbinEnvProvisioner(opts ...envkube.ProvisionerOption) e2e.PulumiEnvRu provisionerOpts := []envkube.ProvisionerOption{ envkube.WithAwsEnv(&awsEnv), - envkube.WithEKSLinuxNodeGroup(), + envkube.WithEKSOptions(eks.WithLinuxNodeGroup()), envkube.WithAgentOptions(kubernetesagentparams.WithHelmValues(systemProbeConfigNPMHelmValues)), envkube.WithWorkloadApp(npmToolsWorkload), } diff --git a/test/new-e2e/tests/otel/otel-agent/infraattributes_eks_test.go b/test/new-e2e/tests/otel/otel-agent/infraattributes_eks_test.go index a94e4ad83de88..8e9fd649ed124 100644 --- a/test/new-e2e/tests/otel/otel-agent/infraattributes_eks_test.go +++ b/test/new-e2e/tests/otel/otel-agent/infraattributes_eks_test.go @@ -11,6 +11,7 @@ import ( "testing" "github.com/DataDog/test-infra-definitions/components/datadog/kubernetesagentparams" + "github.com/DataDog/test-infra-definitions/scenarios/aws/eks" "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/pkg/util/testutil/flake" @@ -33,7 +34,7 @@ datadog: containerCollectUsingFiles: false ` t.Parallel() - e2e.Run(t, &iaEKSTestSuite{}, e2e.WithProvisioner(awskubernetes.EKSProvisioner(awskubernetes.WithEKSLinuxNodeGroup(), awskubernetes.WithAgentOptions(kubernetesagentparams.WithoutDualShipping(), kubernetesagentparams.WithHelmValues(values), kubernetesagentparams.WithOTelAgent(), kubernetesagentparams.WithOTelConfig(iaConfig))))) + e2e.Run(t, &iaEKSTestSuite{}, e2e.WithProvisioner(awskubernetes.EKSProvisioner(awskubernetes.WithEKSOptions(eks.WithLinuxNodeGroup()), awskubernetes.WithAgentOptions(kubernetesagentparams.WithoutDualShipping(), kubernetesagentparams.WithHelmValues(values), kubernetesagentparams.WithOTelAgent(), kubernetesagentparams.WithOTelConfig(iaConfig))))) } var eksParams = utils.IAParams{ diff --git a/test/new-e2e/tests/otel/utils/pipelines_utils.go b/test/new-e2e/tests/otel/utils/pipelines_utils.go index 9365d487b3e36..332137d5566de 100644 --- a/test/new-e2e/tests/otel/utils/pipelines_utils.go +++ b/test/new-e2e/tests/otel/utils/pipelines_utils.go @@ -390,7 +390,7 @@ func TestCalendarApp(s OTelTestSuite) { logs, err := s.Env().FakeIntake.Client().FilterLogs(calendarService, fakeintake.WithMessageContaining(logBody)) assert.NoError(c, err) assert.NotEmpty(c, logs) - }, 60*time.Minute, 10*time.Second) + }, 30*time.Minute, 10*time.Second) } func createCalendarApp(ctx context.Context, s OTelTestSuite) { @@ -465,7 +465,7 @@ func createCalendarApp(ctx context.Context, s OTelTestSuite) { Spec: corev1.PodSpec{ Containers: []corev1.Container{{ Name: name, - Image: "datadog/opentelemetry-examples:calendar-go-rest-0.15", + Image: "ghcr.io/datadog/apps-calendar-go:main", ImagePullPolicy: "IfNotPresent", Ports: []corev1.ContainerPort{{ Name: "http", diff --git a/test/regression/cases/quality_gate_idle/datadog-agent/datadog.yaml b/test/regression/cases/quality_gate_idle/datadog-agent/datadog.yaml new file mode 100644 index 0000000000000..1b9b15d83f17a --- /dev/null +++ b/test/regression/cases/quality_gate_idle/datadog-agent/datadog.yaml @@ -0,0 +1,12 @@ +auth_token_file_path: /tmp/agent-auth-token + +dd_url: http://127.0.0.1:9091 +process_config.process_dd_url: http://localhost:9092 + +# Disable cloud detection. This stops the Agent from poking around the +# execution environment & network. This is particularly important if the target +# has network access. +cloud_provider_metadata: [] + +telemetry.enabled: true +telemetry.checks: '*' diff --git a/test/regression/cases/quality_gate_idle/experiment.yaml b/test/regression/cases/quality_gate_idle/experiment.yaml new file mode 100644 index 0000000000000..f0cd01ba86e3b --- /dev/null +++ b/test/regression/cases/quality_gate_idle/experiment.yaml @@ -0,0 +1,39 @@ +# Agent 'out of the box' idle experiment. Represents an agent install with the +# default configuration and no active workload. + +optimization_goal: memory +erratic: false + +target: + name: datadog-agent + command: /bin/entrypoint.sh + + environment: + DD_API_KEY: 00000001 + DD_HOSTNAME: smp-regression + + profiling_environment: + DD_INTERNAL_PROFILING_BLOCK_PROFILE_RATE: 10000 + DD_INTERNAL_PROFILING_CPU_DURATION: 1m + DD_INTERNAL_PROFILING_DELTA_PROFILES: true + DD_INTERNAL_PROFILING_ENABLED: true + DD_INTERNAL_PROFILING_ENABLE_GOROUTINE_STACKTRACES: true + DD_INTERNAL_PROFILING_MUTEX_PROFILE_FRACTION: 10 + DD_INTERNAL_PROFILING_PERIOD: 1m + DD_INTERNAL_PROFILING_UNIX_SOCKET: /var/run/datadog/apm.socket + DD_PROFILING_EXECUTION_TRACE_ENABLED: true + DD_PROFILING_EXECUTION_TRACE_PERIOD: 1m + DD_PROFILING_WAIT_PROFILE: true + + DD_INTERNAL_PROFILING_EXTRA_TAGS: experiment:quality_gate_idle + +checks: + - name: memory_usage + description: "Memory usage quality gate. This puts a bound on the total agent memory usage." + bounds: + series: total_rss_bytes + upper_bound: "430.0 MiB" + +report_links: + - text: "bounds checks dashboard" + link: "https://app.datadoghq.com/dashboard/vz3-jd5-bdi?fromUser=true&refresh_mode=paused&tpl_var_experiment%5B0%5D={{ experiment }}&tpl_var_job_id%5B0%5D={{ job_id }}&tpl_var_run-id%5B0%5D={{ job_id }}&view=spans&from_ts={{ start_time_ms }}&to_ts={{ end_time_ms }}&live=false" diff --git a/test/regression/cases/quality_gate_idle/lading/lading.yaml b/test/regression/cases/quality_gate_idle/lading/lading.yaml new file mode 100644 index 0000000000000..5e2eb2566ef45 --- /dev/null +++ b/test/regression/cases/quality_gate_idle/lading/lading.yaml @@ -0,0 +1,176 @@ +generator: [] + +blackhole: + - http: + binding_addr: "127.0.0.1:9091" + - http: + binding_addr: "127.0.0.1:9092" + +target_metrics: + - prometheus: #core agent telemetry + uri: "http://127.0.0.1:5000/telemetry" + tags: + sub_agent: "core" + - prometheus: #process agent telemetry + uri: "http://127.0.0.1:6062/telemetry" + tags: + sub_agent: "process" + - expvar: #trace agent telemetry + uri: "http://127.0.0.1:5012/debug/vars" + vars: + - "/Event" + - "/ServiceCheck" + - "/check_run_v1" + - "/cmdline" + - "/compressor/BytesIn" + - "/compressor/BytesOut" + - "/compressor/TotalCompressCycles" + - "/compressor/TotalPayloads" + - "/connections" + - "/container" + - "/events_v2" + - "/forwarder/APIKeyFailure" + - "/forwarder/APIKeyStatus" + - "/forwarder/FileStorage/CurrentSizeInBytes" + - "/forwarder/FileStorage/DeserializeCount" + - "/forwarder/FileStorage/DeserializeErrorsCount" + - "/forwarder/FileStorage/DeserializeTransactionsCount" + - "/forwarder/FileStorage/FileSize" + - "/forwarder/FileStorage/FilesCount" + - "/forwarder/FileStorage/FilesRemovedCount" + - "/forwarder/FileStorage/PointsDroppedCount" + - "/forwarder/FileStorage/SerializeCount" + - "/forwarder/FileStorage/StartupReloadedRetryFilesCount" + - "/forwarder/RemovalPolicy/FilesFromUnknownDomainCount" + - "/forwarder/RemovalPolicy/NewRemovalPolicyCount" + - "/forwarder/RemovalPolicy/OutdatedFilesCount" + - "/forwarder/RemovalPolicy/RegisteredDomainCount" + - "/forwarder/TransactionContainer/CurrentMemSizeInBytes" + - "/forwarder/TransactionContainer/ErrorsCount" + - "/forwarder/TransactionContainer/PointsDroppedCount" + - "/forwarder/TransactionContainer/TransactionsCount" + - "/forwarder/TransactionContainer/TransactionsDroppedCount" + - "/forwarder/Transactions/Cluster" + - "/forwarder/Transactions/ClusterRole" + - "/forwarder/Transactions/ClusterRoleBinding" + - "/forwarder/Transactions/ConnectionEvents/ConnectSuccess" + - "/forwarder/Transactions/ConnectionEvents/DNSSuccess" + - "/forwarder/Transactions/CronJob" + - "/forwarder/Transactions/CustomResource" + - "/forwarder/Transactions/CustomResourceDefinition" + - "/forwarder/Transactions/DaemonSet" + - "/forwarder/Transactions/Deployment" + - "/forwarder/Transactions/Dropped" + - "/forwarder/Transactions/DroppedByEndpoint" + - "/forwarder/Transactions/ECSTask" + - "/forwarder/Transactions/Errors" + - "/forwarder/Transactions/ErrorsByType/ConnectionErrors" + - "/forwarder/Transactions/ErrorsByType/DNSErrors" + - "/forwarder/Transactions/ErrorsByType/SentRequestErrors" + - "/forwarder/Transactions/ErrorsByType/TLSErrors" + - "/forwarder/Transactions/ErrorsByType/WroteRequestErrors" + - "/forwarder/Transactions/HTTPErrors" + - "/forwarder/Transactions/HTTPErrorsByCode" + - "/forwarder/Transactions/HighPriorityQueueFull" + - "/forwarder/Transactions/HorizontalPodAutoscaler" + - "/forwarder/Transactions/Ingress" + - "/forwarder/Transactions/InputBytesByEndpoint" + - "/forwarder/Transactions/InputCountByEndpoint" + - "/forwarder/Transactions/Job" + - "/forwarder/Transactions/LimitRange" + - "/forwarder/Transactions/Namespace" + - "/forwarder/Transactions/NetworkPolicy" + - "/forwarder/Transactions/Node" + - "/forwarder/Transactions/OrchestratorManifest" + - "/forwarder/Transactions/PersistentVolume" + - "/forwarder/Transactions/PersistentVolumeClaim" + - "/forwarder/Transactions/Pod" + - "/forwarder/Transactions/ReplicaSet" + - "/forwarder/Transactions/Requeued" + - "/forwarder/Transactions/RequeuedByEndpoint" + - "/forwarder/Transactions/Retried" + - "/forwarder/Transactions/RetriedByEndpoint" + - "/forwarder/Transactions/RetryQueueSize" + - "/forwarder/Transactions/Role" + - "/forwarder/Transactions/RoleBinding" + - "/forwarder/Transactions/Service" + - "/forwarder/Transactions/ServiceAccount" + - "/forwarder/Transactions/StatefulSet" + - "/forwarder/Transactions/StorageClass" + - "/forwarder/Transactions/Success" + - "/forwarder/Transactions/SuccessByEndpoint/check_run_v1" + - "/forwarder/Transactions/SuccessByEndpoint/connections" + - "/forwarder/Transactions/SuccessByEndpoint/container" + - "/forwarder/Transactions/SuccessByEndpoint/events_v2" + - "/forwarder/Transactions/SuccessByEndpoint/host_metadata_v2" + - "/forwarder/Transactions/SuccessByEndpoint/intake" + - "/forwarder/Transactions/SuccessByEndpoint/orchestrator" + - "/forwarder/Transactions/SuccessByEndpoint/process" + - "/forwarder/Transactions/SuccessByEndpoint/rtcontainer" + - "/forwarder/Transactions/SuccessByEndpoint/rtprocess" + - "/forwarder/Transactions/SuccessByEndpoint/series_v1" + - "/forwarder/Transactions/SuccessByEndpoint/series_v2" + - "/forwarder/Transactions/SuccessByEndpoint/services_checks_v2" + - "/forwarder/Transactions/SuccessByEndpoint/sketches_v1" + - "/forwarder/Transactions/SuccessByEndpoint/sketches_v2" + - "/forwarder/Transactions/SuccessByEndpoint/validate_v1" + - "/forwarder/Transactions/SuccessBytesByEndpoint" + - "/forwarder/Transactions/VerticalPodAutoscaler" + - "/host_metadata_v2" + - "/hostname/errors" + - "/hostname/provider" + - "/intake" + - "/jsonstream/CompressorLocks" + - "/jsonstream/ItemDrops" + - "/jsonstream/PayloadFulls" + - "/jsonstream/TotalCalls" + - "/jsonstream/TotalItems" + - "/jsonstream/TotalLockTime" + - "/jsonstream/TotalSerializationTime" + - "/jsonstream/WriteItemErrors" + - "/kubeletQueries" + - "/orchestrator" + - "/pid" + - "/process" + - "/rtcontainer" + - "/rtprocess" + - "/serializer/SendEventsErrItemTooBigs" + - "/serializer/SendEventsErrItemTooBigsFallback" + - "/series" + - "/series_v1" + - "/series_v2" + - "/services_checks_v2" + - "/sketch_series/ItemTooBig" + - "/sketch_series/PayloadFull" + - "/sketch_series/UnexpectedItemDrops" + - "/sketches_v1" + - "/sketches_v2" + - "/splitter/NotTooBig" + - "/splitter/PayloadDrops" + - "/splitter/TooBig" + - "/splitter/TotalLoops" + - "/stats_writer/Bytes" + - "/stats_writer/ClientPayloads" + - "/stats_writer/Errors" + - "/stats_writer/Payloads" + - "/stats_writer/Retries" + - "/stats_writer/Splits" + - "/stats_writer/StatsBuckets" + - "/stats_writer/StatsEntries" + - "/trace_writer/Bytes" + - "/trace_writer/BytesUncompressed" + - "/trace_writer/Errors" + - "/trace_writer/Events" + - "/trace_writer/Payloads" + - "/trace_writer/Retries" + - "/trace_writer/SingleMaxSize" + - "/trace_writer/Spans" + - "/trace_writer/Traces" + - "/uptime" + - "/validate_v1" + - "/version/Version" + - "/version/GitCommit" + - "/watchdog/CPU/UserAvg" + - "/watchdog/Mem/Alloc" + tags: + sub_agent: "trace" diff --git a/test/regression/cases/quality_gate_idle_all_features/datadog-agent/datadog.yaml b/test/regression/cases/quality_gate_idle_all_features/datadog-agent/datadog.yaml new file mode 100644 index 0000000000000..1960b9b64d8f0 --- /dev/null +++ b/test/regression/cases/quality_gate_idle_all_features/datadog-agent/datadog.yaml @@ -0,0 +1,74 @@ +auth_token_file_path: /tmp/agent-auth-token +hostname: smp-regression + +dd_url: http://127.0.0.1:9092 + +confd_path: /etc/datadog-agent/conf.d + +# Disable cloud detection. This stops the Agent from poking around the +# execution environment & network. This is particularly important if the target +# has network access. +cloud_provider_metadata: [] + +dogstatsd_socket: '/tmp/dsd.socket' + +logs_enabled: true + +apm_config: + enabled: true + +process_config: + process_collection: + enabled: true + container_collection: + enabled: true + +network_path: + connections_monitoring: + enabled: true + +runtime_security_config: + ## Set to true to enable Threat Detection + enabled: true + +cluster_checks: + enabled: true + +otlp_config: + metrics: + enabled: true + traces: + enabled: true + logs: + enabled: true + +system_probe_config: + enabled: true + +network_config: + enabled: true + +# Per Cloud Security Management setup documentation +# https://docs.datadoghq.com/security/cloud_security_management/setup/agent/linux/ +remote_configuration: + # SMP environment does not support remote config currently. + enabled: false + +compliance_config: + ## Set to true to enable CIS benchmarks for Misconfigurations. + enabled: true + host_benchmarks: + enabled: true + +# Vulnerabilities are evaluated and scanned against your containers and hosts every hour. +sbom: + enabled: true + # Set to true to enable Container Vulnerability Management + container_image: + enabled: true + # Set to true to enable Host Vulnerability Management + host: + enabled: true + +container_image: + enabled: true diff --git a/test/regression/cases/quality_gate_idle_all_features/datadog-agent/security-agent.yaml b/test/regression/cases/quality_gate_idle_all_features/datadog-agent/security-agent.yaml new file mode 100644 index 0000000000000..d9ce27c518a1a --- /dev/null +++ b/test/regression/cases/quality_gate_idle_all_features/datadog-agent/security-agent.yaml @@ -0,0 +1,13 @@ +# Per https://docs.datadoghq.com/security/cloud_security_management/setup/agent/linux/ +runtime_security_config: + ## @param enabled - boolean - optional - default: false + ## Set to true to enable Threat Detection + enabled: true + +compliance_config: + ## @param enabled - boolean - optional - default: false + ## Set to true to enable CIS benchmarks for Misconfigurations. + # + enabled: true + host_benchmarks: + enabled: true diff --git a/test/regression/cases/quality_gate_idle_all_features/datadog-agent/system-probe.yaml b/test/regression/cases/quality_gate_idle_all_features/datadog-agent/system-probe.yaml new file mode 100644 index 0000000000000..a7da3c9140d50 --- /dev/null +++ b/test/regression/cases/quality_gate_idle_all_features/datadog-agent/system-probe.yaml @@ -0,0 +1,10 @@ +# Per https://docs.datadoghq.com/security/cloud_security_management/setup/agent/linux/ + +runtime_security_config: + ## @param enabled - boolean - optional - default: false + ## Set to true to enable Threat Detection + enabled: true + + remote_configuration: + ## @param enabled - boolean - optional - default: false + enabled: true diff --git a/test/regression/cases/quality_gate_idle_all_features/experiment.yaml b/test/regression/cases/quality_gate_idle_all_features/experiment.yaml new file mode 100644 index 0000000000000..aec1d9bf04f22 --- /dev/null +++ b/test/regression/cases/quality_gate_idle_all_features/experiment.yaml @@ -0,0 +1,52 @@ +# Agent 'all features enabled' idle experiment. Represents an agent install with +# all sub-agents enabled in configuration and no active workload. + +optimization_goal: memory +erratic: false + +target: + name: datadog-agent + command: /bin/entrypoint.sh + + environment: + DD_TELEMETRY_ENABLED: true + DD_API_KEY: 00000001 + DD_HOSTNAME: smp-regression + DD_DD_URL: http://127.0.0.1:9092 + + profiling_environment: + # internal profiling + DD_INTERNAL_PROFILING_ENABLED: true + DD_SYSTEM_PROBE_INTERNAL_PROFILING_ENABLED: true + # run all the time + DD_SYSTEM_PROBE_INTERNAL_PROFILING_PERIOD: 1m + DD_INTERNAL_PROFILING_PERIOD: 1m + DD_SYSTEM_PROBE_INTERNAL_PROFILING_CPU_DURATION: 1m + DD_INTERNAL_PROFILING_CPU_DURATION: 1m + # destination + DD_INTERNAL_PROFILING_UNIX_SOCKET: /var/run/datadog/apm.socket + DD_SYSTEM_PROBE_CONFIG_INTERNAL_PROFILING_UNIX_SOCKET: /var/run/datadog/apm.socket + # tags + DD_INTERNAL_PROFILING_EXTRA_TAGS: experiment:quality_gate_idle_all_features + DD_SYSTEM_PROBE_CONFIG_INTERNAL_PROFILING_EXTRA_TAGS: experiment:quality_gate_idle_all_features + + DD_INTERNAL_PROFILING_BLOCK_PROFILE_RATE: 10000 + DD_INTERNAL_PROFILING_DELTA_PROFILES: true + DD_INTERNAL_PROFILING_ENABLE_GOROUTINE_STACKTRACES: true + DD_INTERNAL_PROFILING_MUTEX_PROFILE_FRACTION: 10 + + # ddprof options + DD_PROFILING_EXECUTION_TRACE_ENABLED: true + DD_PROFILING_EXECUTION_TRACE_PERIOD: 1m + DD_PROFILING_WAIT_PROFILE: true + +checks: + - name: memory_usage + description: "Memory usage quality gate. This puts a bound on the total agent memory usage." + bounds: + series: total_rss_bytes + upper_bound: "785.0 MiB" + +report_links: + - text: "bounds checks dashboard" + link: "https://app.datadoghq.com/dashboard/vz3-jd5-bdi?fromUser=true&refresh_mode=paused&tpl_var_experiment%5B0%5D={{ experiment }}&tpl_var_job_id%5B0%5D={{ job_id }}&tpl_var_run-id%5B0%5D={{ job_id }}&view=spans&from_ts={{ start_time_ms }}&to_ts={{ end_time_ms }}&live=false" diff --git a/test/regression/cases/quality_gate_idle_all_features/lading/lading.yaml b/test/regression/cases/quality_gate_idle_all_features/lading/lading.yaml new file mode 100644 index 0000000000000..52888afb7176d --- /dev/null +++ b/test/regression/cases/quality_gate_idle_all_features/lading/lading.yaml @@ -0,0 +1,177 @@ +generator: [] + +blackhole: + - http: + binding_addr: "127.0.0.1:9091" + - http: + binding_addr: "127.0.0.1:9092" + +target_metrics: + - prometheus: #core agent telemetry + uri: "http://127.0.0.1:5000/telemetry" + tags: + sub_agent: "core" + - prometheus: #process agent telemetry + uri: "http://127.0.0.1:6062/telemetry" + tags: + sub_agent: "process" + - expvar: #trace agent telemetry + uri: "http://127.0.0.1:5012/debug/vars" + vars: + - "/Event" + - "/ServiceCheck" + - "/check_run_v1" + - "/cmdline" + - "/compressor/BytesIn" + - "/compressor/BytesOut" + - "/compressor/TotalCompressCycles" + - "/compressor/TotalPayloads" + - "/connections" + - "/container" + - "/events_v2" + - "/forwarder/APIKeyFailure" + - "/forwarder/APIKeyStatus" + - "/forwarder/FileStorage/CurrentSizeInBytes" + - "/forwarder/FileStorage/DeserializeCount" + - "/forwarder/FileStorage/DeserializeErrorsCount" + - "/forwarder/FileStorage/DeserializeTransactionsCount" + - "/forwarder/FileStorage/FileSize" + - "/forwarder/FileStorage/FilesCount" + - "/forwarder/FileStorage/FilesRemovedCount" + - "/forwarder/FileStorage/PointsDroppedCount" + - "/forwarder/FileStorage/SerializeCount" + - "/forwarder/FileStorage/StartupReloadedRetryFilesCount" + - "/forwarder/RemovalPolicy/FilesFromUnknownDomainCount" + - "/forwarder/RemovalPolicy/NewRemovalPolicyCount" + - "/forwarder/RemovalPolicy/OutdatedFilesCount" + - "/forwarder/RemovalPolicy/RegisteredDomainCount" + - "/forwarder/TransactionContainer/CurrentMemSizeInBytes" + - "/forwarder/TransactionContainer/ErrorsCount" + - "/forwarder/TransactionContainer/PointsDroppedCount" + - "/forwarder/TransactionContainer/TransactionsCount" + - "/forwarder/TransactionContainer/TransactionsDroppedCount" + - "/forwarder/Transactions/Cluster" + - "/forwarder/Transactions/ClusterRole" + - "/forwarder/Transactions/ClusterRoleBinding" + - "/forwarder/Transactions/ConnectionEvents/ConnectSuccess" + - "/forwarder/Transactions/ConnectionEvents/DNSSuccess" + - "/forwarder/Transactions/CronJob" + - "/forwarder/Transactions/CustomResource" + - "/forwarder/Transactions/CustomResourceDefinition" + - "/forwarder/Transactions/DaemonSet" + - "/forwarder/Transactions/Deployment" + - "/forwarder/Transactions/Dropped" + - "/forwarder/Transactions/DroppedByEndpoint" + - "/forwarder/Transactions/ECSTask" + - "/forwarder/Transactions/Errors" + - "/forwarder/Transactions/ErrorsByType/ConnectionErrors" + - "/forwarder/Transactions/ErrorsByType/DNSErrors" + - "/forwarder/Transactions/ErrorsByType/SentRequestErrors" + - "/forwarder/Transactions/ErrorsByType/TLSErrors" + - "/forwarder/Transactions/ErrorsByType/WroteRequestErrors" + - "/forwarder/Transactions/HTTPErrors" + - "/forwarder/Transactions/HTTPErrorsByCode" + - "/forwarder/Transactions/HighPriorityQueueFull" + - "/forwarder/Transactions/HorizontalPodAutoscaler" + - "/forwarder/Transactions/Ingress" + - "/forwarder/Transactions/InputBytesByEndpoint" + - "/forwarder/Transactions/InputCountByEndpoint" + - "/forwarder/Transactions/Job" + - "/forwarder/Transactions/LimitRange" + - "/forwarder/Transactions/Namespace" + - "/forwarder/Transactions/NetworkPolicy" + - "/forwarder/Transactions/Node" + - "/forwarder/Transactions/OrchestratorManifest" + - "/forwarder/Transactions/PersistentVolume" + - "/forwarder/Transactions/PersistentVolumeClaim" + - "/forwarder/Transactions/Pod" + - "/forwarder/Transactions/ReplicaSet" + - "/forwarder/Transactions/Requeued" + - "/forwarder/Transactions/RequeuedByEndpoint" + - "/forwarder/Transactions/Retried" + - "/forwarder/Transactions/RetriedByEndpoint" + - "/forwarder/Transactions/RetryQueueSize" + - "/forwarder/Transactions/Role" + - "/forwarder/Transactions/RoleBinding" + - "/forwarder/Transactions/Service" + - "/forwarder/Transactions/ServiceAccount" + - "/forwarder/Transactions/StatefulSet" + - "/forwarder/Transactions/StorageClass" + - "/forwarder/Transactions/Success" + - "/forwarder/Transactions/SuccessByEndpoint/check_run_v1" + - "/forwarder/Transactions/SuccessByEndpoint/connections" + - "/forwarder/Transactions/SuccessByEndpoint/container" + - "/forwarder/Transactions/SuccessByEndpoint/events_v2" + - "/forwarder/Transactions/SuccessByEndpoint/host_metadata_v2" + - "/forwarder/Transactions/SuccessByEndpoint/intake" + - "/forwarder/Transactions/SuccessByEndpoint/orchestrator" + - "/forwarder/Transactions/SuccessByEndpoint/process" + - "/forwarder/Transactions/SuccessByEndpoint/rtcontainer" + - "/forwarder/Transactions/SuccessByEndpoint/rtprocess" + - "/forwarder/Transactions/SuccessByEndpoint/series_v1" + - "/forwarder/Transactions/SuccessByEndpoint/series_v2" + - "/forwarder/Transactions/SuccessByEndpoint/services_checks_v2" + - "/forwarder/Transactions/SuccessByEndpoint/sketches_v1" + - "/forwarder/Transactions/SuccessByEndpoint/sketches_v2" + - "/forwarder/Transactions/SuccessByEndpoint/validate_v1" + - "/forwarder/Transactions/SuccessBytesByEndpoint" + - "/forwarder/Transactions/VerticalPodAutoscaler" + - "/host_metadata_v2" + - "/hostname/errors" + - "/hostname/provider" + - "/intake" + - "/jsonstream/CompressorLocks" + - "/jsonstream/ItemDrops" + - "/jsonstream/PayloadFulls" + - "/jsonstream/TotalCalls" + - "/jsonstream/TotalItems" + - "/jsonstream/TotalLockTime" + - "/jsonstream/TotalSerializationTime" + - "/jsonstream/WriteItemErrors" + - "/kubeletQueries" + - "/orchestrator" + - "/pid" + - "/process" + - "/rtcontainer" + - "/rtprocess" + - "/serializer/SendEventsErrItemTooBigs" + - "/serializer/SendEventsErrItemTooBigsFallback" + - "/series" + - "/series_v1" + - "/series_v2" + - "/services_checks_v2" + - "/sketch_series/ItemTooBig" + - "/sketch_series/PayloadFull" + - "/sketch_series/UnexpectedItemDrops" + - "/sketches_v1" + - "/sketches_v2" + - "/splitter/NotTooBig" + - "/splitter/PayloadDrops" + - "/splitter/TooBig" + - "/splitter/TotalLoops" + - "/stats_writer/Bytes" + - "/stats_writer/ClientPayloads" + - "/stats_writer/Errors" + - "/stats_writer/Payloads" + - "/stats_writer/Retries" + - "/stats_writer/Splits" + - "/stats_writer/StatsBuckets" + - "/stats_writer/StatsEntries" + - "/trace_writer/Bytes" + - "/trace_writer/BytesUncompressed" + - "/trace_writer/Errors" + - "/trace_writer/Events" + - "/trace_writer/Payloads" + - "/trace_writer/Retries" + - "/trace_writer/SingleMaxSize" + - "/trace_writer/Spans" + - "/trace_writer/Traces" + - "/uptime" + - "/validate_v1" + - "/version/Version" + - "/version/GitCommit" + - "/watchdog/CPU/UserAvg" + - "/watchdog/Mem/Alloc" + tags: + sub_agent: "trace" + diff --git a/tools/ci/docker-login.ps1 b/tools/ci/docker-login.ps1 index 840b6b786492d..39f03f4fb7632 100644 --- a/tools/ci/docker-login.ps1 +++ b/tools/ci/docker-login.ps1 @@ -9,12 +9,14 @@ If ($lastExitCode -ne "0") { $tmpfile = [System.IO.Path]::GetTempFileName() & "C:\mnt\tools\ci\fetch_secret.ps1" -parameterName "$Env:DOCKER_REGISTRY_LOGIN" -tempFile "$tmpfile" If ($lastExitCode -ne "0") { - throw "Previous command returned $lastExitCode" + Write-Host "Previous command returned $lastExitCode" + exit "$lastExitCode" } $DOCKER_REGISTRY_LOGIN = $(cat "$tmpfile") & "C:\mnt\tools\ci\fetch_secret.ps1" -parameterName "$Env:DOCKER_REGISTRY_PWD" -tempFile "$tmpfile" If ($lastExitCode -ne "0") { - throw "Previous command returned $lastExitCode" + Write-Host "Previous command returned $lastExitCode" + exit "$lastExitCode" } $DOCKER_REGISTRY_PWD = $(cat "$tmpfile") Remove-Item "$tmpfile" diff --git a/tools/windows/DatadogAgentInstaller/WixSetup/Datadog Agent/AgentBinaries.cs b/tools/windows/DatadogAgentInstaller/WixSetup/Datadog Agent/AgentBinaries.cs index 5db25f761e0ef..7c6bf6ba55844 100644 --- a/tools/windows/DatadogAgentInstaller/WixSetup/Datadog Agent/AgentBinaries.cs +++ b/tools/windows/DatadogAgentInstaller/WixSetup/Datadog Agent/AgentBinaries.cs @@ -16,27 +16,9 @@ public class AgentBinaries public string SecurityAgent => $@"{_binSource}\agent\security-agent.exe"; public string LibDatadogAgentThree => $@"{_binSource}\agent\libdatadog-agent-three.dll"; - public string[] PythonThreeBinaries; - public string[] PythonTwoBinaries; - - public string LibDatadogAgentTwo => $@"{_binSource}\agent\libdatadog-agent-two.dll"; - public AgentBinaries(string binSource, string installerSource) { _binSource = binSource; - PythonThreeBinaries = new[] - { - $@"{installerSource}\embedded3\python.exe", - $@"{installerSource}\embedded3\python3.dll", - $@"{installerSource}\embedded3\python312.dll", - $@"{installerSource}\embedded3\pythonw.exe" - }; - PythonTwoBinaries = new[] - { - $@"{installerSource}\embedded2\python.exe", - $@"{installerSource}\embedded2\python27.dll", - $@"{installerSource}\embedded2\pythonw.exe" - }; } } } diff --git a/tools/windows/DatadogAgentInstaller/WixSetup/Datadog Agent/AgentInstaller.cs b/tools/windows/DatadogAgentInstaller/WixSetup/Datadog Agent/AgentInstaller.cs index 3d8bbb7f6b858..46d7807bfc4ef 100644 --- a/tools/windows/DatadogAgentInstaller/WixSetup/Datadog Agent/AgentInstaller.cs +++ b/tools/windows/DatadogAgentInstaller/WixSetup/Datadog Agent/AgentInstaller.cs @@ -37,7 +37,6 @@ public class AgentInstaller : IWixProjectEvents, IMsiInstallerProject private readonly AgentBinaries _agentBinaries; private readonly AgentFeatures _agentFeatures = new(); - private readonly AgentPython _agentPython = new(); private readonly AgentVersion _agentVersion; private readonly AgentCustomActions _agentCustomActions = new(); private readonly AgentInstallerUI _agentInstallerUi; @@ -363,10 +362,6 @@ private Dir CreateProgramFilesFolder() new DirFiles($@"{InstallerSource}\*.txt"), new CompressedDir(this, "embedded3", $@"{InstallerSource}\embedded3") ); - if (_agentPython.IncludePython2) - { - datadogAgentFolder.AddFile(new CompressedDir(this, "embedded2", $@"{InstallerSource}\embedded2")); - } // Recursively delete/backup all files/folders in these paths, they will be restored // on rollback. By default WindowsInstller only removes the files it tracks, and these paths @@ -549,10 +544,6 @@ private Dir CreateBinFolder() new WixSharp.File(_agentBinaries.LibDatadogAgentThree) ); - if (_agentPython.IncludePython2) - { - targetBinFolder.AddFile(new WixSharp.File(_agentBinaries.LibDatadogAgentTwo)); - } return targetBinFolder; } diff --git a/tools/windows/DatadogAgentInstaller/WixSetup/Datadog Agent/AgentPython.cs b/tools/windows/DatadogAgentInstaller/WixSetup/Datadog Agent/AgentPython.cs deleted file mode 100644 index 53012518f09bd..0000000000000 --- a/tools/windows/DatadogAgentInstaller/WixSetup/Datadog Agent/AgentPython.cs +++ /dev/null @@ -1,28 +0,0 @@ -using System; -using System.Linq; - -namespace WixSetup.Datadog_Agent -{ - public class AgentPython - { - public string[] Runtimes { get; } - - public bool IncludePython2 { get; } - - public AgentPython() - { - Runtimes = new[] { "3" }; - var pyRuntimesEnv = Environment.GetEnvironmentVariable("PY_RUNTIMES"); - Console.WriteLine($"Detected Python runtimes: {pyRuntimesEnv}"); - if (pyRuntimesEnv != null) - { - Runtimes = pyRuntimesEnv.Split(','); - if (Runtimes.Any(runtime => runtime.Trim() == "2")) - { - Console.WriteLine("-> Including Python 2 runtime"); - IncludePython2 = true; - } - } - } - } -}