diff --git a/.copyright-overrides.yml b/.copyright-overrides.yml index 1ae74b390f7ca..af175233d08cd 100644 --- a/.copyright-overrides.yml +++ b/.copyright-overrides.yml @@ -131,6 +131,24 @@ github.com/google/go-containerregistry: - "Copyright 2020 Google LLC All Rights Reserved." - "Copyright 2021 Google LLC All Rights Reserved." - "Copyright 2022 Google LLC All Rights Reserved." +github.com/google/go-containerregistry/internal/compression: + - "Copyright 2018 Google LLC All Rights Reserved." + - "Copyright 2019 Google LLC All Rights Reserved." + - "Copyright 2020 Google LLC All Rights Reserved." + - "Copyright 2021 Google LLC All Rights Reserved." + - "Copyright 2022 Google LLC All Rights Reserved." +github.com/google/go-containerregistry/internal/zstd: + - "Copyright 2018 Google LLC All Rights Reserved." + - "Copyright 2019 Google LLC All Rights Reserved." + - "Copyright 2020 Google LLC All Rights Reserved." + - "Copyright 2021 Google LLC All Rights Reserved." + - "Copyright 2022 Google LLC All Rights Reserved." +github.com/google/go-containerregistry/pkg/compression: + - "Copyright 2018 Google LLC All Rights Reserved." + - "Copyright 2019 Google LLC All Rights Reserved." + - "Copyright 2020 Google LLC All Rights Reserved." + - "Copyright 2021 Google LLC All Rights Reserved." + - "Copyright 2022 Google LLC All Rights Reserved." github.com/google/licenseclassifier/v2: - "Copyright 2017 Google LLC All Rights Reserved." - "Copyright 2020 Google LLC All Rights Reserved." @@ -184,25 +202,6 @@ github.com/aquasecurity/go-version: Copyright (c) 2020 Teppei Fukuda (knqyf263) github.com/spdx/tools-golang: Copyright (c) 2018 The Authors github.com/google/flatbuffers: Copyright (c) 2014 Google -# FIXME(AP-2060): inv generate-licenses and inv lint-licenses can generate invalid csv -# while parsing some files. For instance, in the README.md of github.com/klauspost/compress/s2, -# there is an example section which contains: -# // We are only interested in the contents. -# // Assume that files start with "// Copyright (c) 2023". -# // Search for the longest match for that. -# // This may save a few bytes. -# dict := s2.MakeDict(insp.Content(), []byte("// Copyright (c) 2023")) -# This causes the tasks to add 'Copyright (c) 2023". | Copyright (c) 2023"))' to the copyright -# information for that file, resulting in an invalid csv (due to the quotes). -# Until this is fixed, override the copyright value to avoid parsing these files. -github.com/klauspost/compress/s2: - - "Copyright (c) 2011 The Snappy-Go Authors. All rights reserved" - - "Copyright (c) 2012 The Go Authors. All rights reserved" - - "Copyright (c) 2015 Klaus Post" - - "Copyright (c) 2019 Klaus Post. All rights reserved" - - "Copyright 2016 The filepathx Authors" - - "Copyright 2016-2017 The New York Times Company" - # The Copyright information is not contained in the LICENSE file, but it can be found in other # files in the package, such as: # * https://github.com/godror/knownpb/blob/main/timestamppb/timestamp_test.go @@ -210,3 +209,28 @@ github.com/klauspost/compress/s2: # * https://github.com/godror/knownpb/blob/main/internal/writer.go github.com/godror/knownpb/internal: Copyright 2014, 2021 Tamás Gulácsi github.com/godror/knownpb/timestamppb: Copyright 2019, 2021 Tamás Gulácsi + + +github.com/google/s2a-go: Copyright (c) 2020 Google +github.com/google/s2a-go/fallback: Copyright (c) 2020 Google +github.com/google/s2a-go/internal/authinfo: Copyright (c) 2020 Google +github.com/google/s2a-go/internal/handshaker: Copyright (c) 2020 Google +github.com/google/s2a-go/internal/handshaker/service: Copyright (c) 2020 Google +github.com/google/s2a-go/internal/proto/common_go_proto: Copyright (c) 2020 Google +github.com/google/s2a-go/internal/proto/s2a_context_go_proto: Copyright (c) 2020 Google +github.com/google/s2a-go/internal/proto/s2a_go_proto: Copyright (c) 2020 Google +github.com/google/s2a-go/internal/proto/v2/common_go_proto: Copyright (c) 2020 Google +github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto: Copyright (c) 2020 Google +github.com/google/s2a-go/internal/proto/v2/s2a_go_proto: Copyright (c) 2020 Google +github.com/google/s2a-go/internal/record: Copyright (c) 2020 Google +github.com/google/s2a-go/internal/record/internal/aeadcrypter: Copyright (c) 2020 Google +github.com/google/s2a-go/internal/record/internal/halfconn: Copyright (c) 2020 Google +github.com/google/s2a-go/internal/tokenmanager: Copyright (c) 2020 Google +github.com/google/s2a-go/internal/v2: Copyright (c) 2020 Google +github.com/google/s2a-go/internal/v2/certverifier: Copyright (c) 2020 Google +github.com/google/s2a-go/internal/v2/remotesigner: Copyright (c) 2020 Google +github.com/google/s2a-go/internal/v2/tlsconfigstore: Copyright (c) 2020 Google +github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1: Copyright 2018 New York University +go.opentelemetry.io/otel/semconv/internal: Copyright The OpenTelemetry Authors +go.opentelemetry.io/otel/semconv/v1.12.0: Copyright The OpenTelemetry Authors +golang.org/x/crypto/chacha20poly1305: Copyright (c) 2009 The Go Authors. All rights reserved diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index a5fa66bcdb52c..122d57d81377a 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -1,2 +1,2 @@ # removed old // +build go build constraints -e627edf360ef6ce2eeb58e760073bfe2e73a65a6 +5eb542ef374c942a1ca9c4fff9f2511b89a5145e diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index da240606df9b8..16394497748f0 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -116,10 +116,15 @@ /cmd/agent/subcommands/workloadlist @DataDog/container-integrations /cmd/agent/subcommands/run/internal/clcrunnerapi/ @DataDog/container-integrations @DataDog/agent-shared-components /cmd/agent/windows @DataDog/windows-agent -/cmd/agent/dist/conf.d/jetson.d @DataDog/agent-platform -/cmd/agent/dist/conf.d/oracle-dbm.d @DataDog/database-monitoring -/cmd/agent/dist/conf.d/snmp.d/ @DataDog/network-device-monitoring +/cmd/agent/dist/conf.d/container.d/ @DataDog/container-integrations +/cmd/agent/dist/conf.d/containerd.d/ @DataDog/container-integrations +/cmd/agent/dist/conf.d/container_image.d/ @DataDog/container-integrations +/cmd/agent/dist/conf.d/container_lifecycle.d/ @DataDog/container-integrations +/cmd/agent/dist/conf.d/jetson.d/ @DataDog/agent-platform +/cmd/agent/dist/conf.d/oracle-dbm.d/ @DataDog/database-monitoring /cmd/agent/dist/conf.d/oracle-dbm.d/conf.yaml.default @DataDog/database-monitoring +/cmd/agent/dist/conf.d/sbom.d/ @DataDog/container-integrations +/cmd/agent/dist/conf.d/snmp.d/ @DataDog/network-device-monitoring /cmd/agent/*.manifest @DataDog/agent-platform /cmd/agent/*.mc @DataDog/agent-platform /cmd/agent/*.rc @DataDog/agent-platform @@ -135,6 +140,9 @@ /cmd/serverless/ @DataDog/serverless /cmd/serverless-init/ @DataDog/serverless /cmd/system-probe/ @DataDog/ebpf-platform +/cmd/system-probe/config/adjust_npm.go @DataDog/ebpf-platform @DataDog/Networks +/cmd/system-probe/config/adjust_usm.go @DataDog/ebpf-platform @DataDog/universal-service-monitoring +/cmd/system-probe/config/adjust_security.go @DataDog/ebpf-platform @DataDog/agent-security /cmd/system-probe/modules/network_tracer* @DataDog/Networks /cmd/system-probe/modules/oom_kill_probe* @DataDog/container-integrations /cmd/system-probe/modules/process* @DataDog/processes @@ -262,6 +270,8 @@ /pkg/epforwarder/ @DataDog/agent-shared-components @DataDog/agent-metrics-logs /pkg/flare/ @DataDog/agent-shared-components /pkg/otlp/ @DataDog/opentelemetry +/pkg/otlp/*_serverless*.go @DataDog/serverless +/pkg/otlp/*_not_serverless*.go @DataDog/opentelemetry /pkg/pidfile/ @DataDog/agent-shared-components /pkg/persistentcache/ @DataDog/agent-metrics-logs /pkg/proto/ @DataDog/agent-shared-components @@ -289,6 +299,7 @@ /pkg/util/retry/ @DataDog/container-integrations /pkg/util/intern/ @DataDog/ebpf-platform /pkg/util/winutil/ @DataDog/windows-agent +/pkg/languagedetection @DataDog/processes @DataDog/universal-service-monitoring /pkg/logs/ @DataDog/agent-metrics-logs /pkg/process/ @DataDog/processes /pkg/process/util/address*.go @DataDog/Networks diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6400cde2e3cba..d5ae6d6c0a909 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -148,15 +148,15 @@ variables: DATADOG_AGENT_BUILDIMAGES_SUFFIX: "_test_only" DATADOG_AGENT_BUILDIMAGES: v15830939-045ac73 DATADOG_AGENT_WINBUILDIMAGES_SUFFIX: "" - DATADOG_AGENT_WINBUILDIMAGES: v15555630-10b9e4c + DATADOG_AGENT_WINBUILDIMAGES: v16026304-782441d DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX: "_test_only" DATADOG_AGENT_ARMBUILDIMAGES: v15830939-045ac73 DATADOG_AGENT_SYSPROBE_BUILDIMAGES_SUFFIX: "" - DATADOG_AGENT_SYSPROBE_BUILDIMAGES: v15555630-10b9e4c + DATADOG_AGENT_SYSPROBE_BUILDIMAGES: v16026304-782441d DATADOG_AGENT_NIKOS_BUILDIMAGES_SUFFIX: "" - DATADOG_AGENT_NIKOS_BUILDIMAGES: v15555630-10b9e4c + DATADOG_AGENT_NIKOS_BUILDIMAGES: v16026304-782441d DATADOG_AGENT_BTF_GEN_BUILDIMAGES_SUFFIX: "" - DATADOG_AGENT_BTF_GEN_BUILDIMAGES: v15555630-10b9e4c + DATADOG_AGENT_BTF_GEN_BUILDIMAGES: v16026304-782441d DATADOG_AGENT_BUILDERS: v9930706-ef9d493 DATADOG_AGENT_EMBEDDED_PATH: /opt/datadog-agent/embedded diff --git a/.gitlab/binary_build/windows.yml b/.gitlab/binary_build/windows.yml index 63975f7d05cbf..083b9770171f3 100644 --- a/.gitlab/binary_build/windows.yml +++ b/.gitlab/binary_build/windows.yml @@ -13,7 +13,7 @@ build_windows_container_entrypoint: - if (Test-Path build-out) { remove-item -recurse -force build-out } - > docker run --rm - -m 4096M + -m 8192M -v "$(Get-Location):c:\mnt" -e CI_JOB_ID=${CI_JOB_ID} -e WINDOWS_BUILDER=true diff --git a/.gitlab/deploy_6/container.yml b/.gitlab/deploy_6/container.yml index 1ac81f7696b28..7abf39d52d3ba 100644 --- a/.gitlab/deploy_6/container.yml +++ b/.gitlab/deploy_6/container.yml @@ -15,7 +15,7 @@ dependencies: [] before_script: - source /root/.bashrc - - if [[ "$VERSION" == "" ]]; then export VERSION="$(inv -e agent.version --major-version 6 --url-safe)"; fi + - if [[ "$VERSION" == "" ]]; then export VERSION="$(inv agent.version --major-version 6 --url-safe)"; fi - export IMG_SOURCES="${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-6${JMX}-amd64,${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-6${JMX}-arm64" - export IMG_DESTINATIONS="${AGENT_REPOSITORY}:${VERSION}${JMX}" parallel: diff --git a/.gitlab/deploy_7/container.yml b/.gitlab/deploy_7/container.yml index 9b2d4f00b5f3c..df3db2ace79f1 100644 --- a/.gitlab/deploy_7/container.yml +++ b/.gitlab/deploy_7/container.yml @@ -15,7 +15,7 @@ dependencies: [] before_script: - source /root/.bashrc - - if [[ "$VERSION" == "" ]]; then export VERSION="$(inv -e agent.version --major-version 7 --url-safe)"; fi + - if [[ "$VERSION" == "" ]]; then export VERSION="$(inv agent.version --major-version 7 --url-safe)"; fi - export IMG_BASE_SRC="${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}" - export IMG_LINUX_SOURCES="${IMG_BASE_SRC}-7${JMX}-amd64,${IMG_BASE_SRC}-7${JMX}-arm64" - export IMG_WINDOWS_SOURCES="${IMG_BASE_SRC}-7${JMX}-win1809${SERVERCORE}-amd64,${IMG_BASE_SRC}-7${JMX}-winltsc2022${SERVERCORE}-amd64" @@ -57,7 +57,7 @@ deploy_containers-dogstatsd: dependencies: [] before_script: - source /root/.bashrc - - export VERSION="$(inv -e agent.version --major-version 7 --url-safe)" + - export VERSION="$(inv agent.version --major-version 7 --url-safe)" - export IMG_SOURCES="${SRC_DSD}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-amd64,${SRC_DSD}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-arm64" - export IMG_DESTINATIONS="${DSD_REPOSITORY}:${VERSION}" diff --git a/.gitlab/deploy_dca.yml b/.gitlab/deploy_dca.yml index 1643b67d79a63..1e4db98735f20 100644 --- a/.gitlab/deploy_dca.yml +++ b/.gitlab/deploy_dca.yml @@ -15,7 +15,7 @@ dependencies: [] before_script: - source /root/.bashrc - - if [[ "$VERSION" == "" ]]; then export VERSION="$(inv -e agent.version --major-version 7 --url-safe)"; fi + - if [[ "$VERSION" == "" ]]; then export VERSION="$(inv agent.version --major-version 7 --url-safe)"; fi - if [[ "$CLUSTER_AGENT_REPOSITORY" == "" ]]; then export CLUSTER_AGENT_REPOSITORY="cluster-agent"; fi - export IMG_BASE_SRC="${SRC_DCA}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}" - export IMG_SOURCES="${IMG_BASE_SRC}-amd64,${IMG_BASE_SRC}-arm64" diff --git a/.gitlab/deps_build.yml b/.gitlab/deps_build.yml index 228c7b9355a89..a866a025f8f5b 100644 --- a/.gitlab/deps_build.yml +++ b/.gitlab/deps_build.yml @@ -98,7 +98,7 @@ build_vcpkg_deps: - if (Test-Path build-out) { remove-item -recurse -force build-out } - > docker run --rm - -m 4096M + -m 8192M -v "$(Get-Location):c:\mnt" -e VCPKG_BINARY_SOURCES="clear;x-azblob,${vcpkgBlobSaSUrl},readwrite" 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} diff --git a/.gitlab/e2e.yml b/.gitlab/e2e.yml index ce57d3c1032b6..aa7ec87256082 100644 --- a/.gitlab/e2e.yml +++ b/.gitlab/e2e.yml @@ -47,7 +47,7 @@ k8s-e2e-tags-6: extends: .k8s_e2e_template rules: !reference [.on_deploy_stable_or_beta_repo_branch_a6_manual] script: - - AGENT_VERSION=$(inv -e agent.version --major-version 6) + - AGENT_VERSION=$(inv agent.version --major-version 6) - DCA_VERSION=$(inv -e cluster-agent.version) - inv -e e2e-tests --agent-image=datadog/agent:${AGENT_VERSION} --dca-image=datadog/cluster-agent:${DCA_VERSION} --argo-workflow=default @@ -55,7 +55,7 @@ k8s-e2e-tags-7: extends: .k8s_e2e_template rules: !reference [.on_deploy_stable_or_beta_repo_branch_a7_manual] script: - - AGENT_VERSION=$(inv -e agent.version --major-version 7) + - AGENT_VERSION=$(inv agent.version --major-version 7) - DCA_VERSION=$(inv -e cluster-agent.version) - inv -e e2e-tests --agent-image=datadog/agent:${AGENT_VERSION} --dca-image=datadog/cluster-agent:${DCA_VERSION} --argo-workflow=default diff --git a/.gitlab/functional_test/regression_detector.yml b/.gitlab/functional_test/regression_detector.yml index a3c0fa8b7d1d5..041f06ad69615 100644 --- a/.gitlab/functional_test/regression_detector.yml +++ b/.gitlab/functional_test/regression_detector.yml @@ -9,6 +9,7 @@ single-machine-performance-regression_detector: expire_in: 1 weeks paths: - submission_metadata + when: always variables: SMP_VERSION: 0.7.3 LADING_VERSION: 0.14.0 diff --git a/.gitlab/functional_test/security_agent.yml b/.gitlab/functional_test/security_agent.yml index 84d8f2b786314..52230b02bd397 100644 --- a/.gitlab/functional_test/security_agent.yml +++ b/.gitlab/functional_test/security_agent.yml @@ -91,7 +91,7 @@ kitchen_test_security_agent_arm64: matrix: - KITCHEN_PLATFORM: "ubuntu" KITCHEN_OSVERS: "ubuntu-20-04-2,ubuntu-22-04" - KITCHEN_CWS_PLATFORM: [host, docker] + KITCHEN_CWS_PLATFORM: [host, docker, ad] kitchen_test_security_agent_amazonlinux_x64: extends: diff --git a/.gitlab/functional_test/system_probe.yml b/.gitlab/functional_test/system_probe.yml index 06664cca7f9e4..39dedc42c5fc0 100644 --- a/.gitlab/functional_test/system_probe.yml +++ b/.gitlab/functional_test/system_probe.yml @@ -12,6 +12,7 @@ rules: !reference [.on_system_probe_changes_or_manual] stage: functional_test + timeout: 3h variables: AGENT_MAJOR_VERSION: 7 DD_PIPELINE_ID: $CI_PIPELINE_ID-fnct @@ -37,6 +38,21 @@ - $S3_CP_CMD $S3_ARTIFACTS_URI/minimized-btfs-${ARCH}.tar.xz /tmp/minimized-btfs.tar.xz - cp /tmp/minimized-btfs.tar.xz $DD_AGENT_TESTING_DIR/site-cookbooks/dd-system-probe-check/files/minimized-btfs.tar.xz +# This dummy job is added here because we want the functional_tests stage to start at the same time as kernel_matrix_testing stage. +# The ebpf-platform team is trying to measure the time from the start of the pipeline to the completion of the kernel_matrix_testing and functional_tests stages, to measure improvement. +# The CI visibility product currently does not provide a way to measure this in the datadog app. +# We are trying to hack around this by having jobs which are triggered at the start of a pipeline. +# For functional tests this is the 'kitchen_test_dummy_job_tmp' and for kernel_matrix_testing it is the 'pull_test_dockers*' jobs. +# This way the stage.duration metric will allow us to measure what we want. +# This will most likely be temporary, until we decide which approach to move forward with for testing in the future. +kitchen_test_dummy_job_tmp: + stage: functional_test + image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/btf-gen$DATADOG_AGENT_BTF_GEN_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BTF_GEN_BUILDIMAGES + needs: [] + tags: ["runner:main"] + script: + - 'true' + kitchen_test_system_probe_linux_x64_ec2: extends: - .kitchen_test_system_probe @@ -58,24 +74,50 @@ kitchen_test_system_probe_linux_x64_ec2: parallel: matrix: - KITCHEN_PLATFORM: "amazonlinux" - KITCHEN_OSVERS: "amazonlinux2-4-14,amazonlinux2-5-10,amazonlinux2022-5-15" + KITCHEN_OSVERS: "amazonlinux2-4-14" + - KITCHEN_PLATFORM: "amazonlinux" + KITCHEN_OSVERS: "amazonlinux2-5-10" + - KITCHEN_PLATFORM: "amazonlinux" + KITCHEN_OSVERS: "amazonlinux2022-5-15" + - KITCHEN_PLATFORM: "amazonlinux" + KITCHEN_OSVERS: "amazonlinux2023" + - KITCHEN_PLATFORM: "centos" + KITCHEN_OSVERS: "centos-79" + KITCHEN_EC2_DEVICE_NAME: "/dev/sda1" - KITCHEN_PLATFORM: "centos" - KITCHEN_OSVERS: "centos-79,rhel-86" + KITCHEN_OSVERS: "rhel-86" + KITCHEN_EC2_DEVICE_NAME: "/dev/sda1" + - KITCHEN_PLATFORM: "debian" + KITCHEN_OSVERS: "debian-10" + - KITCHEN_PLATFORM: "debian" + KITCHEN_OSVERS: "debian-11" + - KITCHEN_PLATFORM: "fedora" + KITCHEN_OSVERS: "fedora-36" + KITCHEN_EC2_DEVICE_NAME: "/dev/sda1" + - KITCHEN_PLATFORM: "fedora" + KITCHEN_OSVERS: "fedora-37" KITCHEN_EC2_DEVICE_NAME: "/dev/sda1" - KITCHEN_PLATFORM: "ubuntu" KITCHEN_OSVERS: "ubuntu-16-04-4.4" KITCHEN_EC2_DEVICE_NAME: "/dev/sda1" - KITCHEN_PLATFORM: "ubuntu" - KITCHEN_OSVERS: "ubuntu-16-04,ubuntu-18-04,ubuntu-20-04,ubuntu-22-04" + KITCHEN_OSVERS: "ubuntu-16-04" + KITCHEN_EC2_DEVICE_NAME: "/dev/sda1" + - KITCHEN_PLATFORM: "ubuntu" + KITCHEN_OSVERS: "ubuntu-18-04" + KITCHEN_EC2_DEVICE_NAME: "/dev/sda1" + - KITCHEN_PLATFORM: "ubuntu" + KITCHEN_OSVERS: "ubuntu-20-04" + KITCHEN_EC2_DEVICE_NAME: "/dev/sda1" + - KITCHEN_PLATFORM: "ubuntu" + KITCHEN_OSVERS: "ubuntu-22-04" KITCHEN_EC2_DEVICE_NAME: "/dev/sda1" - - KITCHEN_PLATFORM: "debian" - KITCHEN_OSVERS: "debian-10,debian-11" kitchen_test_system_probe_linux_arm64: extends: - .kitchen_test_system_probe - .kitchen_ec2_location_us_east_1 - - .kitchen_ec2_spot_instances + - .kitchen_ec2 needs: [ "tests_ebpf_arm64", "prepare_ebpf_functional_tests_arm64", "generate_minimized_btfs_arm64", "pull_test_dockers_arm64" ] variables: ARCH: arm64 @@ -91,16 +133,38 @@ kitchen_test_system_probe_linux_arm64: - tasks/kitchen_setup.sh parallel: matrix: - - KITCHEN_PLATFORM: "ubuntu" - KITCHEN_OSVERS: "ubuntu-18-04,ubuntu-20-04,ubuntu-22-04" - - KITCHEN_PLATFORM: "debian" - KITCHEN_OSVERS: "debian-10,debian-11" + - KITCHEN_PLATFORM: "amazonlinux" + KITCHEN_OSVERS: "amazonlinux2-4-14" + KITCHEN_EC2_DEVICE_NAME: "/dev/xvda" + - KITCHEN_PLATFORM: "amazonlinux" + KITCHEN_OSVERS: "amazonlinux2-5-10" + KITCHEN_EC2_DEVICE_NAME: "/dev/xvda" + - KITCHEN_PLATFORM: "amazonlinux" + KITCHEN_OSVERS: "amazonlinux2022-5-15" KITCHEN_EC2_DEVICE_NAME: "/dev/xvda" - - KITCHEN_PLATFORM: "centos" - KITCHEN_OSVERS: "centos-79,rhel-86" - KITCHEN_PLATFORM: "amazonlinux" - KITCHEN_OSVERS: "amazonlinux2-4-14,amazonlinux2-5-10,amazonlinux2022-5-15" + KITCHEN_OSVERS: "amazonlinux2023" KITCHEN_EC2_DEVICE_NAME: "/dev/xvda" + - KITCHEN_PLATFORM: "centos" + KITCHEN_OSVERS: "centos-79" + - KITCHEN_PLATFORM: "centos" + KITCHEN_OSVERS: "rhel-86" + - KITCHEN_PLATFORM: "debian" + KITCHEN_OSVERS: "debian-10" + KITCHEN_EC2_DEVICE_NAME: "/dev/xvda" + - KITCHEN_PLATFORM: "debian" + KITCHEN_OSVERS: "debian-11" + KITCHEN_EC2_DEVICE_NAME: "/dev/xvda" + - KITCHEN_PLATFORM: "fedora" + KITCHEN_OSVERS: "fedora-36" + - KITCHEN_PLATFORM: "fedora" + KITCHEN_OSVERS: "fedora-37" + - KITCHEN_PLATFORM: "ubuntu" + KITCHEN_OSVERS: "ubuntu-18-04" + - KITCHEN_PLATFORM: "ubuntu" + KITCHEN_OSVERS: "ubuntu-20-04" + - KITCHEN_PLATFORM: "ubuntu" + KITCHEN_OSVERS: "ubuntu-22-04" kitchen_test_system_probe_windows_x64: extends: diff --git a/.gitlab/kitchen_cleanup.yml b/.gitlab/kitchen_cleanup.yml index e62ba38c00694..e83a4729492aa 100644 --- a/.gitlab/kitchen_cleanup.yml +++ b/.gitlab/kitchen_cleanup.yml @@ -8,24 +8,6 @@ # include: # - /.gitlab/kitchen_common/cleanup.yml -kitchen_cleanup_s3-a6: - extends: .kitchen_cleanup_s3_common - rules: - !reference [.on_kitchen_tests_a6] - dependencies: ["agent_deb-x64-a6", "agent_heroku_deb-x64-a6"] - variables: - AGENT_MAJOR_VERSION: 6 - DD_PIPELINE_ID: $CI_PIPELINE_ID-a6 - -kitchen_cleanup_s3-a7: - extends: .kitchen_cleanup_s3_common - rules: - !reference [.on_default_kitchen_tests_a7] - dependencies: ["agent_deb-x64-a7", "agent_heroku_deb-x64-a7"] - variables: - AGENT_MAJOR_VERSION: 7 - DD_PIPELINE_ID: $CI_PIPELINE_ID-a7 - kitchen_cleanup_azure-a6: extends: .kitchen_cleanup_azure_common rules: diff --git a/.gitlab/kitchen_testing/windows.yml b/.gitlab/kitchen_testing/windows.yml index f3d92624b9aa0..7d8529bf52f2b 100644 --- a/.gitlab/kitchen_testing/windows.yml +++ b/.gitlab/kitchen_testing/windows.yml @@ -29,7 +29,7 @@ - .kitchen_azure_x64 variables: KITCHEN_PLATFORM: "windows" - KITCHEN_OSVERS: "win2008r2,win2012,win2012r2" + KITCHEN_OSVERS: "win2012,win2012r2" before_script: # Note: if you are changing this, remember to also change .kitchen_test_windows_installer, which has a copy of this with less TEST_PLATFORMS defined. - if [ $AGENT_MAJOR_VERSION == "7" ]; then export WINDOWS_TESTING_S3_BUCKET=$WINDOWS_TESTING_S3_BUCKET_A7; else export WINDOWS_TESTING_S3_BUCKET=$WINDOWS_TESTING_S3_BUCKET_A6; fi - cd $DD_AGENT_TESTING_DIR diff --git a/.gitlab/package_build/windows.yml b/.gitlab/package_build/windows.yml index a4eb2e0a4fcd5..1a0434c9b7130 100644 --- a/.gitlab/package_build/windows.yml +++ b/.gitlab/package_build/windows.yml @@ -13,7 +13,7 @@ - !reference [.setup_python_mirror_win] - > docker run --rm - -m 4096M + -m 8192M -v "$(Get-Location):c:\mnt" -e CI_JOB_ID=${CI_JOB_ID} -e CI_PIPELINE_ID=${CI_PIPELINE_ID} @@ -100,7 +100,7 @@ windows_zip_agent_binaries_x64-a7: - !reference [.setup_python_mirror_win] - > docker run --rm - -m 4096M + -m 8192M -v "$(Get-Location):c:\mnt" -e CI_COMMIT_BRANCH=${CI_COMMIT_BRANCH} -e OMNIBUS_TARGET=${OMNIBUS_TARGET} diff --git a/.gitlab/source_test/macos.yml b/.gitlab/source_test/macos.yml index 0e68692439d9a..45d2d6814bc84 100644 --- a/.gitlab/source_test/macos.yml +++ b/.gitlab/source_test/macos.yml @@ -16,6 +16,7 @@ tests_macos: - !reference [.setup_python_mirror_linux] - python3 -m pip install -r tasks/libs/requirements-github.txt - inv -e github.trigger-macos-test --datadog-agent-ref "$CI_COMMIT_SHA" --python-runtimes "$PYTHON_RUNTIMES" + after_script: - inv -e junit-macos-repack --infile junit-tests_macos.tgz --outfile junit-tests_macos-repacked.tgz artifacts: expire_in: 2 weeks diff --git a/.gitlab/source_test/windows.yml b/.gitlab/source_test/windows.yml index 02546abda29da..fb2aaae6f4352 100644 --- a/.gitlab/source_test/windows.yml +++ b/.gitlab/source_test/windows.yml @@ -13,7 +13,7 @@ - !reference [.setup_python_mirror_win] - > docker run --rm - -m 8192M + -m 16384M -v "$(Get-Location):c:\mnt" -e CI_JOB_URL="${CI_JOB_URL}" -e CI_JOB_NAME="${CI_JOB_NAME}" diff --git a/CHANGELOG.rst b/CHANGELOG.rst index fb5b6dbcba8fc..d10c8af360216 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,38 @@ Release Notes ============= +.. _Release Notes_7.44.1: + +7.44.1 / 6.44.1 +====== + +.. _Release Notes_7.44.1_Prelude: + +Prelude +------- + +Release on: 2023-05-16 + + +.. _Release Notes_7.44.1_Enhancement Notes: + +Enhancement Notes +----------------- + +- Agents are now built with Go ``1.19.8``. + +- Added optional config flag `process_config.cache_lookupid` to cache calls to `user.LookupId` in the process Agent. + Use to minimize the number of calls to `user.LookupId` and avoid potential leak. + + +.. _Release Notes_7.44.1_Bug Fixes: + +Bug Fixes +--------- + +- Fixes the inclusion of the ``security-agent.yaml`` file in the flare. + + .. _Release Notes_7.44.0: 7.44.0 / 6.44.0 diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index 4eb9c8ebe4ef7..27ca099576513 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -22,7 +22,6 @@ core,code.cloudfoundry.org/garden/client,Apache-2.0,"Copyright (c) 2016-Present core,code.cloudfoundry.org/garden/client/connection,Apache-2.0,"Copyright (c) 2016-Present CloudFoundry.org Foundation, Inc. All Rights Reserved." core,code.cloudfoundry.org/garden/routes,Apache-2.0,"Copyright (c) 2016-Present CloudFoundry.org Foundation, Inc. All Rights Reserved." core,code.cloudfoundry.org/garden/transport,Apache-2.0,"Copyright (c) 2016-Present CloudFoundry.org Foundation, Inc. All Rights Reserved." -core,code.cloudfoundry.org/gofileutils/fileutils,Apache-2.0,"Copyright (c) 2016-Present CloudFoundry.org Foundation, Inc. All Rights Reserved." core,code.cloudfoundry.org/lager,Apache-2.0,"Copyright (c) 2016-Present CloudFoundry.org Foundation, Inc. All Rights Reserved." core,code.cloudfoundry.org/tlsconfig,Apache-2.0,"Copyright (c) 2016-Present CloudFoundry.org Foundation, Inc. All Rights Reserved." core,contrib.go.opencensus.io/exporter/prometheus,Apache-2.0,"Copyright 2020, OpenCensus Authors" @@ -82,14 +81,6 @@ core,github.com/DataDog/go-tuf/pkg/keys,BSD-3-Clause,"Copyright (c) 2014-2020 Pr core,github.com/DataDog/go-tuf/pkg/targets,BSD-3-Clause,"Copyright (c) 2014-2020 Prime Directive, Inc. All rights reserved" core,github.com/DataDog/go-tuf/util,BSD-3-Clause,"Copyright (c) 2014-2020 Prime Directive, Inc. All rights reserved" core,github.com/DataDog/go-tuf/verify,BSD-3-Clause,"Copyright (c) 2014-2020 Prime Directive, Inc. All rights reserved" -core,github.com/DataDog/gohai/cpu,MIT,"Copyright (c) 2014-2015, Datadog | Copyright © 2015 Kentaro Kuribayashi " -core,github.com/DataDog/gohai/filesystem,MIT,"Copyright (c) 2014-2015, Datadog | Copyright © 2015 Kentaro Kuribayashi " -core,github.com/DataDog/gohai/memory,MIT,"Copyright (c) 2014-2015, Datadog | Copyright © 2015 Kentaro Kuribayashi " -core,github.com/DataDog/gohai/network,MIT,"Copyright (c) 2014-2015, Datadog | Copyright © 2015 Kentaro Kuribayashi " -core,github.com/DataDog/gohai/platform,MIT,"Copyright (c) 2014-2015, Datadog | Copyright © 2015 Kentaro Kuribayashi " -core,github.com/DataDog/gohai/processes,MIT,"Copyright (c) 2014-2015, Datadog | Copyright © 2015 Kentaro Kuribayashi " -core,github.com/DataDog/gohai/processes/gops,MIT,"Copyright (c) 2014-2015, Datadog | Copyright © 2015 Kentaro Kuribayashi " -core,github.com/DataDog/gohai/utils,MIT,"Copyright (c) 2014-2015, Datadog | Copyright © 2015 Kentaro Kuribayashi " core,github.com/DataDog/gopsutil/cpu,BSD-3-Clause,"Copyright (c) 2009 The Go Authors. All rights reserved | Copyright (c) 2014, WAKAYAMA Shirou" core,github.com/DataDog/gopsutil/host,BSD-3-Clause,"Copyright (c) 2009 The Go Authors. All rights reserved | Copyright (c) 2014, WAKAYAMA Shirou" core,github.com/DataDog/gopsutil/internal/common,BSD-3-Clause,"Copyright (c) 2009 The Go Authors. All rights reserved | Copyright (c) 2014, WAKAYAMA Shirou" @@ -143,7 +134,9 @@ core,github.com/Masterminds/semver/v3,MIT,"Copyright (C) 2014-2019, Matt Butcher core,github.com/Masterminds/sprig/v3,MIT,Copyright (C) 2013-2020 Masterminds core,github.com/Microsoft/go-winio,MIT,Copyright (c) 2015 Microsoft core,github.com/Microsoft/go-winio/backuptar,MIT,Copyright (c) 2015 Microsoft +core,github.com/Microsoft/go-winio/internal/fs,MIT,Copyright (c) 2015 Microsoft core,github.com/Microsoft/go-winio/internal/socket,MIT,Copyright (c) 2015 Microsoft +core,github.com/Microsoft/go-winio/internal/stringbuffer,MIT,Copyright (c) 2015 Microsoft core,github.com/Microsoft/go-winio/pkg/guid,MIT,Copyright (c) 2015 Microsoft core,github.com/Microsoft/go-winio/pkg/security,MIT,Copyright (c) 2015 Microsoft core,github.com/Microsoft/go-winio/vhd,MIT,Copyright (c) 2015 Microsoft @@ -562,7 +555,8 @@ core,github.com/cloudflare/circl/math/mlsbset,BSD-3-Clause,Copyright (c) 2009 Th core,github.com/cloudflare/circl/sign,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved | Copyright (c) 2019 Cloudflare. All rights reserved core,github.com/cloudflare/circl/sign/ed25519,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved | Copyright (c) 2019 Cloudflare. All rights reserved core,github.com/cloudflare/circl/sign/ed448,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved | Copyright (c) 2019 Cloudflare. All rights reserved -core,github.com/cloudfoundry-community/go-cfclient,MIT,Copyright (c) 2017 Long Nguyen +core,github.com/cloudfoundry-community/go-cfclient/v2,MIT,Copyright (c) 2017 Long Nguyen +core,github.com/containerd/cgroups,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/cgroups/stats/v1,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/cgroups/v2/stats,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/containerd,Apache-2.0,"Copyright 2012-2015 Docker, Inc." @@ -647,6 +641,7 @@ core,github.com/coreos/go-semver/semver,Apache-2.0,"Copyright 2017 CoreOS, Inc" core,github.com/coreos/go-systemd/dbus,Apache-2.0,"Copyright 2017 CoreOS, Inc" core,github.com/coreos/go-systemd/sdjournal,Apache-2.0,"Copyright 2017 CoreOS, Inc" core,github.com/coreos/go-systemd/v22/daemon,Apache-2.0,"Copyright 2017 CoreOS, Inc" +core,github.com/coreos/go-systemd/v22/dbus,Apache-2.0,"Copyright 2017 CoreOS, Inc" core,github.com/coreos/go-systemd/v22/journal,Apache-2.0,"Copyright 2017 CoreOS, Inc" core,github.com/coreos/pkg/dlopen,Apache-2.0,"Copyright 2017 CoreOS, Inc" core,github.com/cri-o/ocicni/pkg/ocicni,Apache-2.0,"Copyright 2016 Red Hat, Inc" @@ -832,6 +827,7 @@ core,github.com/gobwas/glob/syntax/lexer,MIT,Copyright (c) 2016 Sergey Kamardin core,github.com/gobwas/glob/util/runes,MIT,Copyright (c) 2016 Sergey Kamardin core,github.com/gobwas/glob/util/strings,MIT,Copyright (c) 2016 Sergey Kamardin core,github.com/godbus/dbus,BSD-2-Clause,"Copyright (c) 2013, Georg Reinke (), Google" +core,github.com/godbus/dbus/v5,BSD-2-Clause,"Copyright (c) 2013, Georg Reinke (), Google" core,github.com/godror/godror,Apache-2.0,"Alberto Andria | Andy Garfield | Anthony Tuininga | Cemre MENGU | Chris Duncan | Christopher Jones | Copyright (c) 2009 The Go Authors. All rights reserved | Copyright (c) 2015 go-logfmt | Copyright (c) 2016, 2018 Oracle and/or its affiliates. All rights reserved | Copyright (c) 2017 The Go Authors. All rights reserved | Copyright 2017, 2020 Tamás Gulácsi | David Garcia | Gerasimos (Makis) Maropoulos | Harris | Ivan Markin | Ivan Pedersen | Kurt K <44790488+kurt-google@users.noreply.github.com> | Michael Kenney | Mislav Kasner | Noval Agung Prayogo | Ricardo Vegas | Robin van Duiven | Ryan Bastic | Saimon | Sudarshan Soma | Tamás Gulácsi | Tomoya AMACHI | Walter Alves Wanderley | ricardojesus.vegas | ubinix-warun | zhuqiuzhi " core,github.com/godror/godror/dsn,Apache-2.0,"Alberto Andria | Andy Garfield | Anthony Tuininga | Cemre MENGU | Chris Duncan | Christopher Jones | Copyright (c) 2009 The Go Authors. All rights reserved | Copyright (c) 2015 go-logfmt | Copyright (c) 2016, 2018 Oracle and/or its affiliates. All rights reserved | Copyright (c) 2017 The Go Authors. All rights reserved | Copyright 2017, 2020 Tamás Gulácsi | David Garcia | Gerasimos (Makis) Maropoulos | Harris | Ivan Markin | Ivan Pedersen | Kurt K <44790488+kurt-google@users.noreply.github.com> | Michael Kenney | Mislav Kasner | Noval Agung Prayogo | Ricardo Vegas | Robin van Duiven | Ryan Bastic | Saimon | Sudarshan Soma | Tamás Gulácsi | Tomoya AMACHI | Walter Alves Wanderley | ricardojesus.vegas | ubinix-warun | zhuqiuzhi " core,github.com/godror/godror/odpi/embed,Apache-2.0,"Alberto Andria | Andy Garfield | Anthony Tuininga | Cemre MENGU | Chris Duncan | Christopher Jones | Copyright (c) 2009 The Go Authors. All rights reserved | Copyright (c) 2015 go-logfmt | Copyright (c) 2016, 2018 Oracle and/or its affiliates. All rights reserved | Copyright (c) 2016, 2022 Oracle and/or its affiliates | Copyright (c) 2017 The Go Authors. All rights reserved | Copyright 2017, 2020 Tamás Gulácsi | David Garcia | Gerasimos (Makis) Maropoulos | Harris | Ivan Markin | Ivan Pedersen | Kurt K <44790488+kurt-google@users.noreply.github.com> | Michael Kenney | Mislav Kasner | Noval Agung Prayogo | Ricardo Vegas | Robin van Duiven | Ryan Bastic | Saimon | Sudarshan Soma | Tamás Gulácsi | Tomoya AMACHI | Walter Alves Wanderley | ricardojesus.vegas | ubinix-warun | zhuqiuzhi " @@ -848,6 +844,8 @@ core,github.com/gogo/protobuf/sortkeys,BSD-3-Clause,"Copyright (c) 2013, The GoG core,github.com/gogo/protobuf/types,BSD-3-Clause,"Copyright (c) 2013, The GoGo Authors. All rights reserved. | Copyright 2010 The Go Authors. All rights reserved." core,github.com/golang-jwt/jwt/v4,MIT,Copyright (c) 2012 Dave Grijalva | Copyright (c) 2021 golang-jwt maintainers core,github.com/golang/glog,Apache-2.0,Copyright (c) 2009 The Go Authors. All rights reserved. +core,github.com/golang/glog/internal/logsink,Apache-2.0,Copyright (c) 2009 The Go Authors. All rights reserved. +core,github.com/golang/glog/internal/stackdump,Apache-2.0,Copyright (c) 2009 The Go Authors. All rights reserved. core,github.com/golang/groupcache/lru,Apache-2.0,Copyright (c) 2009 The Go Authors. All rights reserved. core,github.com/golang/mock/gomock,Apache-2.0,Copyright (c) 2009 The Go Authors. All rights reserved. core,github.com/golang/protobuf/descriptor,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. @@ -860,7 +858,6 @@ core,github.com/golang/protobuf/protoc-gen-go/plugin,BSD-3-Clause,Copyright (c) core,github.com/golang/protobuf/ptypes,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. core,github.com/golang/protobuf/ptypes/any,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. core,github.com/golang/protobuf/ptypes/duration,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. -core,github.com/golang/protobuf/ptypes/empty,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. core,github.com/golang/protobuf/ptypes/struct,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. core,github.com/golang/protobuf/ptypes/timestamp,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. core,github.com/golang/protobuf/ptypes/wrappers,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. @@ -871,13 +868,16 @@ core,github.com/google/go-cmp/cmp/internal/flags,BSD-3-Clause,Copyright (c) 2017 core,github.com/google/go-cmp/cmp/internal/function,BSD-3-Clause,Copyright (c) 2017 The Go Authors. All rights reserved. core,github.com/google/go-cmp/cmp/internal/value,BSD-3-Clause,Copyright (c) 2017 The Go Authors. All rights reserved. core,github.com/google/go-containerregistry/internal/and,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. +core,github.com/google/go-containerregistry/internal/compression,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/internal/estargz,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/internal/gzip,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/internal/redact,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/internal/retry,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/internal/retry/wait,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/internal/verify,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. +core,github.com/google/go-containerregistry/internal/zstd,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/pkg/authn,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. +core,github.com/google/go-containerregistry/pkg/compression,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/pkg/logs,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/pkg/name,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/pkg/v1,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. @@ -914,6 +914,25 @@ core,github.com/google/pprof/profile,Apache-2.0,Andrew Hunter | Copyright 2010-2017 Mike Bostock | Google Inc. | Hyoun Kyu Cho | Martin Spier | Raul Silvera | Taco de Wolff | Tipp Moseley core,github.com/google/pprof/third_party/d3flamegraph,Apache-2.0,Andrew Hunter | Google Inc. | Hyoun Kyu Cho | Martin Spier | Raul Silvera | Taco de Wolff | Tipp Moseley core,github.com/google/pprof/third_party/svgpan,BSD-3-Clause,Andrew Hunter | Copyright 2009-2017 Andrea Leofreddi . All rights reserved | Google Inc. | Hyoun Kyu Cho | Martin Spier | Raul Silvera | Taco de Wolff | Tipp Moseley +core,github.com/google/s2a-go,Apache-2.0,Copyright (c) 2020 Google +core,github.com/google/s2a-go/fallback,Apache-2.0,Copyright (c) 2020 Google +core,github.com/google/s2a-go/internal/authinfo,Apache-2.0,Copyright (c) 2020 Google +core,github.com/google/s2a-go/internal/handshaker,Apache-2.0,Copyright (c) 2020 Google +core,github.com/google/s2a-go/internal/handshaker/service,Apache-2.0,Copyright (c) 2020 Google +core,github.com/google/s2a-go/internal/proto/common_go_proto,Apache-2.0,Copyright (c) 2020 Google +core,github.com/google/s2a-go/internal/proto/s2a_context_go_proto,Apache-2.0,Copyright (c) 2020 Google +core,github.com/google/s2a-go/internal/proto/s2a_go_proto,Apache-2.0,Copyright (c) 2020 Google +core,github.com/google/s2a-go/internal/proto/v2/common_go_proto,Apache-2.0,Copyright (c) 2020 Google +core,github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto,Apache-2.0,Copyright (c) 2020 Google +core,github.com/google/s2a-go/internal/proto/v2/s2a_go_proto,Apache-2.0,Copyright (c) 2020 Google +core,github.com/google/s2a-go/internal/record,Apache-2.0,Copyright (c) 2020 Google +core,github.com/google/s2a-go/internal/record/internal/aeadcrypter,Apache-2.0,Copyright (c) 2020 Google +core,github.com/google/s2a-go/internal/record/internal/halfconn,Apache-2.0,Copyright (c) 2020 Google +core,github.com/google/s2a-go/internal/tokenmanager,Apache-2.0,Copyright (c) 2020 Google +core,github.com/google/s2a-go/internal/v2,Apache-2.0,Copyright (c) 2020 Google +core,github.com/google/s2a-go/internal/v2/certverifier,Apache-2.0,Copyright (c) 2020 Google +core,github.com/google/s2a-go/internal/v2/remotesigner,Apache-2.0,Copyright (c) 2020 Google +core,github.com/google/s2a-go/internal/v2/tlsconfigstore,Apache-2.0,Copyright (c) 2020 Google core,github.com/google/uuid,BSD-3-Clause,"Copyright (c) 2009,2014 Google Inc. All rights reserved | Paul Borman | bmatsuo | cd1 | dansouza | dsymonds | jboverfelt | shawnps | theory | wallclockbuilder" core,github.com/google/wire,Apache-2.0,Chris Lewis | Christina Austin <4240737+clausti@users.noreply.github.com> | Eno Compton | Google LLC | Issac Trotts | Kumbirai Tanekha | Oleg Kovalov | Robert van Gent | Ross Light | Tuo Shan | Yoichiro Shimizu | Zachary Romero | ktr core,github.com/googleapis/enterprise-certificate-proxy/client,Apache-2.0,Copyright 2017 Google Inc. @@ -988,6 +1007,7 @@ core,github.com/in-toto/in-toto-golang/in_toto,Apache-2.0,Copyright 2018 New Yor core,github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common,Apache-2.0,Copyright 2018 New York University core,github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1,Apache-2.0,Copyright 2018 New York University core,github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2,Apache-2.0,Copyright 2018 New York University +core,github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1,Apache-2.0,Copyright 2018 New York University core,github.com/inconshreveable/mousetrap,Apache-2.0,Copyright 2022 Alan Shreve (@inconshreveable) core,github.com/invopop/jsonschema,MIT,Copyright (C) 2014 Alec Thomas core,github.com/iovisor/gobpf/pkg/cpupossible,Apache-2.0,Copyright 2016 Kinvolk | Copyright 2016 PLUMgrid @@ -1114,11 +1134,13 @@ core,github.com/open-policy-agent/opa/ast/internal/tokens,Apache-2.0,Copyright 2 core,github.com/open-policy-agent/opa/ast/location,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. core,github.com/open-policy-agent/opa/bundle,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. core,github.com/open-policy-agent/opa/capabilities,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. +core,github.com/open-policy-agent/opa/config,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. core,github.com/open-policy-agent/opa/format,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. core,github.com/open-policy-agent/opa/internal/bundle,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. core,github.com/open-policy-agent/opa/internal/cidr/merge,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. core,github.com/open-policy-agent/opa/internal/compiler/wasm,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. core,github.com/open-policy-agent/opa/internal/compiler/wasm/opa,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. +core,github.com/open-policy-agent/opa/internal/config,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. core,github.com/open-policy-agent/opa/internal/debug,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. core,github.com/open-policy-agent/opa/internal/deepcopy,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. core,github.com/open-policy-agent/opa/internal/edittree,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. @@ -1149,8 +1171,10 @@ core,github.com/open-policy-agent/opa/internal/providers/aws/crypto,Apache-2.0,C core,github.com/open-policy-agent/opa/internal/providers/aws/v4,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. core,github.com/open-policy-agent/opa/internal/ref,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. core,github.com/open-policy-agent/opa/internal/rego/opa,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. +core,github.com/open-policy-agent/opa/internal/runtime/init,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. core,github.com/open-policy-agent/opa/internal/semver,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. core,github.com/open-policy-agent/opa/internal/strings,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. +core,github.com/open-policy-agent/opa/internal/strvals,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. core,github.com/open-policy-agent/opa/internal/uuid,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. core,github.com/open-policy-agent/opa/internal/version,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. core,github.com/open-policy-agent/opa/internal/wasm/constant,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. @@ -1164,8 +1188,12 @@ core,github.com/open-policy-agent/opa/internal/wasm/util,Apache-2.0,Copyright 20 core,github.com/open-policy-agent/opa/ir,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. core,github.com/open-policy-agent/opa/keys,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. core,github.com/open-policy-agent/opa/loader,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. +core,github.com/open-policy-agent/opa/loader/extension,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. core,github.com/open-policy-agent/opa/loader/filter,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. +core,github.com/open-policy-agent/opa/logging,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. core,github.com/open-policy-agent/opa/metrics,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. +core,github.com/open-policy-agent/opa/plugins,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. +core,github.com/open-policy-agent/opa/plugins/rest,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. core,github.com/open-policy-agent/opa/rego,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. core,github.com/open-policy-agent/opa/resolver,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. core,github.com/open-policy-agent/opa/resolver/wasm,Apache-2.0,Copyright 2016 The OPA Authors. All rights reserved. @@ -1525,13 +1553,10 @@ core,go.opentelemetry.io/collector/pdata/internal/json,Apache-2.0,Copyright The core,go.opentelemetry.io/collector/pdata/internal/otlp,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/pdata/pcommon,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/pdata/plog,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/collector/pdata/plog/internal/plogjson,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/pdata/plog/plogotlp,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/pdata/pmetric,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/collector/pdata/pmetric/internal/pmetricjson,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/pdata/ptrace,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/collector/pdata/ptrace/internal/ptracejson,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/processor,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/processor/batchprocessor,Apache-2.0,Copyright The OpenTelemetry Authors @@ -1587,7 +1612,9 @@ core,go.opentelemetry.io/otel/sdk/metric/internal,Apache-2.0,Copyright The OpenT core,go.opentelemetry.io/otel/sdk/metric/metricdata,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/otel/sdk/resource,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/otel/sdk/trace,Apache-2.0,Copyright The OpenTelemetry Authors +core,go.opentelemetry.io/otel/semconv/internal,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/otel/semconv/internal/v2,Apache-2.0,Copyright The OpenTelemetry Authors +core,go.opentelemetry.io/otel/semconv/v1.12.0,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/otel/semconv/v1.17.0,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/otel/semconv/v1.17.0/httpconv,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/otel/trace,Apache-2.0,Copyright The OpenTelemetry Authors @@ -1632,6 +1659,7 @@ core,golang.org/x/crypto/blake2b,BSD-3-Clause,Copyright (c) 2009 The Go Authors. core,golang.org/x/crypto/blowfish,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved core,golang.org/x/crypto/cast5,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved core,golang.org/x/crypto/chacha20,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved +core,golang.org/x/crypto/chacha20poly1305,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved core,golang.org/x/crypto/cryptobyte,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved core,golang.org/x/crypto/cryptobyte/asn1,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved core,golang.org/x/crypto/curve25519,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved diff --git a/cmd/agent/common/path/path_darwin.go b/cmd/agent/common/path/path_darwin.go index 338ff7d2aca70..9bcc40a5775b7 100644 --- a/cmd/agent/common/path/path_darwin.go +++ b/cmd/agent/common/path/path_darwin.go @@ -24,6 +24,8 @@ const ( DefaultCheckFlareDirectory = "/opt/datadog-agent/logs/checks/" // DefaultJMXFlareDirectory a flare friendly location for jmx command logs to be written DefaultJMXFlareDirectory = "/opt/datadog-agent/logs/jmxinfo/" + //DefaultDogstatsDLogFile points to the dogstatsd stats log file that will be used if not configured + DefaultDogstatsDLogFile = "/opt/datadog-agent/logs/dogstatsd_info/dogstatsd-stats.log" ) var ( diff --git a/cmd/agent/common/path/path_freebsd.go b/cmd/agent/common/path/path_freebsd.go index 0aebd0d55e35a..05746ebe2a36b 100644 --- a/cmd/agent/common/path/path_freebsd.go +++ b/cmd/agent/common/path/path_freebsd.go @@ -24,6 +24,8 @@ const ( DefaultCheckFlareDirectory = "/var/log/datadog/checks/" // DefaultJMXFlareDirectory a flare friendly location for jmx command logs to be written DefaultJMXFlareDirectory = "/var/log/datadog/jmxinfo/" + //DefaultDogstatsDLogFile points to the dogstatsd stats log file that will be used if not configured + DefaultDogstatsDLogFile = "/var/log/datadog/dogstatsd_info/dogstatsd-stats.log" ) var ( diff --git a/cmd/agent/common/path/path_nix.go b/cmd/agent/common/path/path_nix.go index be4b4d212e345..cab7d2a781b8f 100644 --- a/cmd/agent/common/path/path_nix.go +++ b/cmd/agent/common/path/path_nix.go @@ -26,6 +26,8 @@ const ( DefaultCheckFlareDirectory = "/var/log/datadog/checks/" // DefaultJMXFlareDirectory a flare friendly location for jmx command logs to be written DefaultJMXFlareDirectory = "/var/log/datadog/jmxinfo/" + //DefaultDogstatsDLogFile points to the dogstatsd stats log file that will be used if not configured + DefaultDogstatsDLogFile = "/var/log/datadog/dogstatsd_info/dogstatsd-stats.log" ) var ( diff --git a/cmd/agent/common/path/path_windows.go b/cmd/agent/common/path/path_windows.go index b15ee9e826781..a5d98f3bc785f 100644 --- a/cmd/agent/common/path/path_windows.go +++ b/cmd/agent/common/path/path_windows.go @@ -38,6 +38,8 @@ var ( DefaultCheckFlareDirectory = "c:\\programdata\\datadog\\logs\\checks\\" // DefaultJMXFlareDirectory a flare friendly location for jmx command logs to be written DefaultJMXFlareDirectory = "c:\\programdata\\datadog\\logs\\jmxinfo\\" + //DefaultDogstatsDLogFile points to the dogstatsd stats log file that will be used if not configured + DefaultDogstatsDLogFile = "c:\\programdata\\datadog\\logs\\dogstatsd_info\\dogstatsd-stats.log" ) func init() { @@ -46,6 +48,7 @@ func init() { DefaultConfPath = pd DefaultLogFile = filepath.Join(pd, "logs", "agent.log") DefaultDCALogFile = filepath.Join(pd, "logs", "cluster-agent.log") + DefaultDogstatsDLogFile = filepath.Join(pd, "logs", "dogstatsd_info", "dogstatsd-stats.log") } } diff --git a/cmd/agent/dist/conf.d/container_image.d/conf.yaml.default b/cmd/agent/dist/conf.d/container_image.d/conf.yaml.default new file mode 100644 index 0000000000000..1e58dbbaafbbd --- /dev/null +++ b/cmd/agent/dist/conf.d/container_image.d/conf.yaml.default @@ -0,0 +1,5 @@ +ad_identifiers: + - _container_image +init_config: +instances: + - diff --git a/cmd/agent/dist/conf.d/container_lifecycle.d/conf.yaml.default b/cmd/agent/dist/conf.d/container_lifecycle.d/conf.yaml.default new file mode 100644 index 0000000000000..4e60c87b2e456 --- /dev/null +++ b/cmd/agent/dist/conf.d/container_lifecycle.d/conf.yaml.default @@ -0,0 +1,5 @@ +ad_identifiers: + - _container_lifecycle +init_config: +instances: + - diff --git a/cmd/agent/dist/conf.d/openmetrics.d/agent_internal.yaml b/cmd/agent/dist/conf.d/openmetrics.d/agent_internal.yaml new file mode 100644 index 0000000000000..6720e60036616 --- /dev/null +++ b/cmd/agent/dist/conf.d/openmetrics.d/agent_internal.yaml @@ -0,0 +1,10 @@ +instances: + - + openmetrics_endpoint: http://localhost:5000/telemetry + namespace: datadog.agent + no_index: true + metrics: + - logs_sender_latency.* + - logs_sent.* + - logs_dropped.* + - payload_drops.* diff --git a/cmd/agent/dist/conf.d/oracle-dbm.d/conf.yaml.example b/cmd/agent/dist/conf.d/oracle-dbm.d/conf.yaml.example index 7ce2c9ad2a046..c22d9bd7ecd41 100644 --- a/cmd/agent/dist/conf.d/oracle-dbm.d/conf.yaml.example +++ b/cmd/agent/dist/conf.d/oracle-dbm.d/conf.yaml.example @@ -15,7 +15,6 @@ instances: ## If you use Oracle names resolution via tnsnamas.ora or ldap.ora, ## you must specify 'tns_alias' instead of 'server' and 'service_name'. # - - server: or server: : ## @param port - string - optional @@ -65,6 +64,33 @@ instances: ## @param reported_hostname - string - optional ## Set the reported hostname for this instance. This value overrides the hostname detected by the Agent + ## Configure collection of database sysmetrics + # + # sysmetrics: + + ## @param enabled - boolean - optional - default: true + ## Enable collection of database sysmetrics + # + # enabled: true + + ## Configure collection of tablespace usage + # + # tablespaces: + + ## @param enabled - boolean - optional - default: true + ## Enable collection of tablespace usage + # + # enabled: true + + ## Configure collection of process memory usage + # + # processes: + + ## @param enabled - boolean - optional - default: true + ## Enable collection of process memory usage + # + # enabled: true + ## @param dbm - boolean - optional - default: false ## Set to `true` to enable Database Monitoring. # @@ -91,6 +117,15 @@ instances: ## Configure query metrics collection for Datadog agent statements # # include_datadog_queries: false + + ## Configure collection of execution plans + # + # execution_plans: + + ## @param enabled - boolean - optional - default: false + ## Enable collection of execution plans. Requires query metrics. + # + # enabled: false ## Configure how the SQL obfuscator behaves. ## Note: This option only applies when `dbm` is enabled. @@ -137,3 +172,28 @@ instances: # tags: # - : # - : + + ## Start SQL trace for agent queries + ## Requires execute the execute privilege on `dbms_monitor` to datadog user + # + # agent_sql_trace: + + ## @param enabled - boolean - optional - default: false + ## Enable SQL trace + # + # enabled: false + + ## @param enabled - boolean - optional - default: false + ## include bind variables in trace + # + # binds: false + + ## @param enabled - boolean - optional - default: false + ## include wait events in trace + # + # waits: false + + ## @param enabled - int - optional - default: 10 + ## Limit the number of traced check executions to avoid filling the file system. + # + # traced_runs: 10 diff --git a/cmd/agent/dist/conf.d/sbom.d/conf.yaml.default b/cmd/agent/dist/conf.d/sbom.d/conf.yaml.default new file mode 100644 index 0000000000000..faba9dbb04b91 --- /dev/null +++ b/cmd/agent/dist/conf.d/sbom.d/conf.yaml.default @@ -0,0 +1,5 @@ +ad_identifiers: + - _sbom +init_config: +instances: + - diff --git a/cmd/agent/install_mac_os.sh b/cmd/agent/install_mac_os.sh index 3a946e81f7aff..6ebf1964b1a7d 100755 --- a/cmd/agent/install_mac_os.sh +++ b/cmd/agent/install_mac_os.sh @@ -5,7 +5,7 @@ # Datadog Agent install script for macOS. set -e -install_script_version=1.1.0 +install_script_version=1.2.0 dmg_file=/tmp/datadog-agent.dmg dmg_base_url="https://s3.amazonaws.com/dd-agent" etc_dir=/opt/datadog-agent/etc @@ -40,6 +40,11 @@ if [ -n "$DD_SITE" ]; then site=$DD_SITE fi +agent_dist_channel= +if [ -n "$DD_AGENT_DIST_CHANNEL" ]; then + agent_dist_channel="$DD_AGENT_DIST_CHANNEL" +fi + if [ -n "$DD_AGENT_MINOR_VERSION" ]; then # Examples: # - 20 = defaults to highest patch version x.20.2 @@ -172,7 +177,12 @@ if [ -z "$dmg_version" ]; then dmg_version="${agent_major_version}.${agent_minor_version}-1" fi fi -dmg_url="$dmg_base_url/datadog-agent-${dmg_version}.dmg" + +if [ -z "$agent_dist_channel" ]; then + dmg_url="$dmg_base_url/datadog-agent-${dmg_version}.dmg" +else + dmg_url="$dmg_base_url/$agent_dist_channel/datadog-agent-${dmg_version}.dmg" +fi if [ "$upgrade" ]; then if [ ! -f $etc_dir/datadog.conf ]; then diff --git a/cmd/agent/subcommands/flare/command.go b/cmd/agent/subcommands/flare/command.go index 3bbf6419a6396..d8a9972552008 100644 --- a/cmd/agent/subcommands/flare/command.go +++ b/cmd/agent/subcommands/flare/command.go @@ -84,6 +84,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { commonpath.PyChecksPath, commonpath.DefaultLogFile, commonpath.DefaultJmxLogFile, + commonpath.DefaultDogstatsDLogFile, )), flare.Module, core.Bundle, diff --git a/cmd/agent/subcommands/integrations/command.go b/cmd/agent/subcommands/integrations/command.go index cb65f26e5b8f0..89be8dcc1dd3b 100644 --- a/cmd/agent/subcommands/integrations/command.go +++ b/cmd/agent/subcommands/integrations/command.go @@ -78,13 +78,14 @@ type cliParams struct { // args are the positional command-line arguments args []string - allowRoot bool - verbose int - useSysPython bool - versionOnly bool - localWheel bool - thirdParty bool - pythonMajorVersion string + allowRoot bool + verbose int + useSysPython bool + versionOnly bool + localWheel bool + thirdParty bool + pythonMajorVersion string + unsafeDisableVerification bool } // Commands returns a slice of subcommands for the 'agent' command. @@ -133,6 +134,10 @@ You must specify a version of the package to install using the syntax: installCmd.Flags().BoolVarP( &cliParams.thirdParty, "third-party", "t", false, "install a community or vendor-contributed integration", ) + installCmd.Flags().BoolVar( + &cliParams.unsafeDisableVerification, "unsafe-disable-verification", false, "Disable trust and safety checks (only in case of unanticipated issues and when advised by customer support)", + ) + integrationCmd.AddCommand(installCmd) removeCmd := &cobra.Command{ @@ -517,6 +522,10 @@ func downloadWheel(cliParams *cliParams, integration, version, rootLayoutType st args = append(args, fmt.Sprintf("-%s", strings.Repeat("v", cliParams.verbose))) } + if cliParams.unsafeDisableVerification { + args = append(args, "--unsafe-disable-verification") + } + downloaderCmd := exec.Command(pyPath, args...) // We do all of the following so that when we call our downloader, which will diff --git a/cmd/agent/subcommands/integrations/command_test.go b/cmd/agent/subcommands/integrations/command_test.go index 9fa42b4129ccd..ebb07a5be4491 100644 --- a/cmd/agent/subcommands/integrations/command_test.go +++ b/cmd/agent/subcommands/integrations/command_test.go @@ -30,6 +30,17 @@ func TestInstallCommand(t *testing.T) { }) } +func TestInstallSkipVerificationCommand(t *testing.T) { + fxutil.TestOneShotSubcommand(t, + Commands(&command.GlobalParams{}), + []string{"integration", "install", "foo==1.0", "--unsafe-disable-verification"}, + install, + func(cliParams *cliParams, coreParams core.BundleParams) { + require.Equal(t, []string{"foo==1.0"}, cliParams.args) + require.Equal(t, true, cliParams.unsafeDisableVerification) + }) +} + func TestRemoveCommand(t *testing.T) { fxutil.TestOneShotSubcommand(t, Commands(&command.GlobalParams{}), diff --git a/cmd/agent/subcommands/run/command.go b/cmd/agent/subcommands/run/command.go index 230d6705725ab..f23430d859e28 100644 --- a/cmd/agent/subcommands/run/command.go +++ b/cmd/agent/subcommands/run/command.go @@ -253,6 +253,7 @@ func getSharedFxOption() fx.Option { path.PyChecksPath, path.DefaultLogFile, path.DefaultJmxLogFile, + path.DefaultDogstatsDLogFile, )), flare.Module, core.Bundle, @@ -336,9 +337,7 @@ func startAgent( // Setup expvar server telemetryHandler := telemetry.Handler() expvarPort := pkgconfig.Datadog.GetString("expvar_port") - if pkgconfig.Datadog.GetBool("telemetry.enabled") { - http.Handle("/telemetry", telemetryHandler) - } + http.Handle("/telemetry", telemetryHandler) go func() { common.ExpvarServer = &http.Server{ Addr: fmt.Sprintf("127.0.0.1:%s", expvarPort), @@ -434,6 +433,8 @@ func startAgent( opts := aggregator.DefaultAgentDemultiplexerOptions() opts.EnableNoAggregationPipeline = pkgconfig.Datadog.GetBool("dogstatsd_no_aggregation_pipeline") + opts.UseDogstatsdContextLimiter = true + opts.DogstatsdMaxMetricsTags = pkgconfig.Datadog.GetInt("dogstatsd_max_metrics_tags") demux = aggregator.InitAndStartAgentDemultiplexer(sharedForwarder, opts, hostnameDetected) // Setup stats telemetry handler diff --git a/cmd/cluster-agent/api/v1/kubernetes_metadata.go b/cmd/cluster-agent/api/v1/kubernetes_metadata.go index e3371bcff4c5a..00a612af80120 100644 --- a/cmd/cluster-agent/api/v1/kubernetes_metadata.go +++ b/cmd/cluster-agent/api/v1/kubernetes_metadata.go @@ -21,6 +21,7 @@ import ( as "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" apicommon "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/DataDog/datadog-agent/pkg/workloadmeta" ) func installKubernetesMetadataEndpoints(r *mux.Router) { @@ -36,7 +37,7 @@ func installKubernetesMetadataEndpoints(r *mux.Router) { func installCloudFoundryMetadataEndpoints(r *mux.Router) {} // getNodeMetadata is only used when the node agent hits the DCA for the list of labels -func getNodeMetadata(w http.ResponseWriter, r *http.Request, f func(*as.APIClient, string) (map[string]string, error), what string, filterList []string) { +func getNodeMetadata(w http.ResponseWriter, r *http.Request, f func(*workloadmeta.KubernetesNode) map[string]string, what string, filterList []string) { /* Input localhost:5001/api/v1/tags/node/localhost @@ -54,25 +55,19 @@ func getNodeMetadata(w http.ResponseWriter, r *http.Request, f func(*as.APIClien Example: "no cached metadata found for the node localhost" */ - // As HTTP query handler, we do not retry getting the APIServer - // Client will have to retry query in case of failure - cl, err := as.GetAPIClient() - if err != nil { - log.Errorf("Can't create client to query the API Server: %v", err) //nolint:errcheck - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - vars := mux.Vars(r) var dataBytes []byte nodeName := vars["nodeName"] - nodeData, err := f(cl, nodeName) + + nodeEntity, err := workloadmeta.GetGlobalStore().GetKubernetesNode(nodeName) if err != nil { log.Errorf("Could not retrieve the node %s of %s: %v", what, nodeName, err.Error()) //nolint:errcheck http.Error(w, err.Error(), http.StatusInternalServerError) return } + nodeData := f(nodeEntity) + // Filter data to avoid returning too big useless data if filterList != nil { newNodeData := make(map[string]string) @@ -100,11 +95,11 @@ func getNodeMetadata(w http.ResponseWriter, r *http.Request, f func(*as.APIClien } func getNodeLabels(w http.ResponseWriter, r *http.Request) { - getNodeMetadata(w, r, as.GetNodeLabels, "labels", nil) + getNodeMetadata(w, r, func(e *workloadmeta.KubernetesNode) map[string]string { return e.Labels }, "labels", nil) } func getNodeAnnotations(w http.ResponseWriter, r *http.Request) { - getNodeMetadata(w, r, as.GetNodeAnnotations, "annotations", config.Datadog.GetStringSlice("kubernetes_node_annotations_as_host_aliases")) + getNodeMetadata(w, r, func(e *workloadmeta.KubernetesNode) map[string]string { return e.Annotations }, "annotations", config.Datadog.GetStringSlice("kubernetes_node_annotations_as_host_aliases")) } // getNamespaceLabels is only used when the node agent hits the DCA for the list of labels diff --git a/cmd/cluster-agent/subcommands/start/command.go b/cmd/cluster-agent/subcommands/start/command.go index 947d5bbb535b5..2e5a237c235b7 100644 --- a/cmd/cluster-agent/subcommands/start/command.go +++ b/cmd/cluster-agent/subcommands/start/command.go @@ -325,7 +325,7 @@ func start(log log.Component, config config.Component, forwarder defaultforwarde pkglog.Errorf("Could not start admission controller: %v", err) } else { // Webhook and secret controllers are started successfully - // Setup the the k8s admission webhook server + // Setup the k8s admission webhook server server := admissioncmd.NewServer() server.Register(pkgconfig.Datadog.GetString("admission_controller.inject_config.endpoint"), mutate.InjectConfig, apiCl.DynamicCl) server.Register(pkgconfig.Datadog.GetString("admission_controller.inject_tags.endpoint"), mutate.InjectTags, apiCl.DynamicCl) diff --git a/cmd/security-agent/flags/flags_common.go b/cmd/security-agent/flags/flags_common.go index b21327bdaa0c1..07e697385af8a 100644 --- a/cmd/security-agent/flags/flags_common.go +++ b/cmd/security-agent/flags/flags_common.go @@ -45,6 +45,12 @@ const ( Input = "input" Remote = "remote" + // Security Profile Subcommand + SecurityProfileInput = "input" + IncludeCache = "include-cache" + ImageName = "name" + ImageTag = "tag" + // Compliance Subcommand SourceType = "source-type" SourceName = "source-name" diff --git a/cmd/security-agent/subcommands/check/command.go b/cmd/security-agent/subcommands/check/command.go index 206cc7945fd39..1b69513f8dd9c 100644 --- a/cmd/security-agent/subcommands/check/command.go +++ b/cmd/security-agent/subcommands/check/command.go @@ -27,7 +27,9 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/log" "github.com/DataDog/datadog-agent/pkg/compliance" + "github.com/DataDog/datadog-agent/pkg/compliance/k8sconfig" pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/security/common" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/hostname" @@ -103,7 +105,7 @@ func RunCheck(log log.Component, config config.Component, checkArgs *CliParams) return err } - var statsdClient ddgostatsd.ClientInterface + var statsdClient *ddgostatsd.Client metricsEnabled := config.GetBool("compliance_config.metrics.enabled") if metricsEnabled { // Create a statsd Client @@ -123,6 +125,13 @@ func RunCheck(log log.Component, config config.Component, checkArgs *CliParams) } } + if len(checkArgs.args) == 1 && checkArgs.args[0] == "k8sconfig" { + _, resourceData := k8sconfig.LoadConfiguration(context.Background(), os.Getenv("HOST_ROOT")) + b, _ := json.MarshalIndent(resourceData, "", " ") + fmt.Println(string(b)) + return nil + } + var resolver compliance.Resolver if checkArgs.overrideRegoInput != "" { resolver = newFakeResolver(checkArgs.overrideRegoInput) @@ -181,7 +190,7 @@ func RunCheck(log log.Component, config config.Component, checkArgs *CliParams) var ruleEvents []*compliance.CheckEvent switch { case rule.IsXCCDF(): - ruleEvents = compliance.EvaluateXCCDFRule(context.Background(), hname, benchmark, rule) + ruleEvents = compliance.EvaluateXCCDFRule(context.Background(), hname, statsdClient, benchmark, rule) case rule.IsRego(): ruleEvents = compliance.ResolveAndEvaluateRegoRule(context.Background(), resolver, benchmark, rule) } @@ -229,7 +238,7 @@ func reportComplianceEvents(log log.Component, config config.Component, events [ stopper := startstop.NewSerialStopper() defer stopper.Stop() runPath := config.GetString("compliance_config.run_path") - endpoints, context, err := command.NewLogContextCompliance(log) + endpoints, context, err := common.NewLogContextCompliance() if err != nil { return fmt.Errorf("reporter: could not reate log context for compliance: %w", err) } @@ -238,11 +247,7 @@ func reportComplianceEvents(log log.Component, config config.Component, events [ return fmt.Errorf("reporter: could not create: %w", err) } for _, event := range events { - buf, err := json.Marshal(event) - if err != nil { - return fmt.Errorf("reporter: could not marshal event: %w", err) - } - reporter.ReportRaw(buf, "") + reporter.ReportEvent(event) } return nil } diff --git a/cmd/security-agent/subcommands/compliance/command.go b/cmd/security-agent/subcommands/compliance/command.go index eeec092e906f0..987eb4202f2c1 100644 --- a/cmd/security-agent/subcommands/compliance/command.go +++ b/cmd/security-agent/subcommands/compliance/command.go @@ -6,7 +6,6 @@ package compliance import ( - "encoding/json" "fmt" "strings" @@ -20,6 +19,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/log" "github.com/DataDog/datadog-agent/pkg/compliance" + "github.com/DataDog/datadog-agent/pkg/security/common" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/startstop" ) @@ -81,7 +81,7 @@ func eventRun(log log.Component, config config.Component, eventArgs *cliParams) stopper := startstop.NewSerialStopper() defer stopper.Stop() - endpoints, dstContext, err := command.NewLogContextCompliance(log) + endpoints, dstContext, err := common.NewLogContextCompliance() if err != nil { return err } @@ -101,11 +101,6 @@ func eventRun(log log.Component, config config.Component, eventArgs *cliParams) eventData[kv[0]] = kv[1] } eventArgs.event.Data = eventData - - buf, err := json.Marshal(eventData) - if err != nil { - return err - } - reporter.ReportRaw(buf, "") + reporter.ReportEvent(eventData) return nil } diff --git a/cmd/security-agent/subcommands/compliance/compliance.go b/cmd/security-agent/subcommands/compliance/compliance.go index 47e7d65dcf04d..41e6583e0d184 100644 --- a/cmd/security-agent/subcommands/compliance/compliance.go +++ b/cmd/security-agent/subcommands/compliance/compliance.go @@ -10,11 +10,11 @@ import ( "os" "time" - "github.com/DataDog/datadog-agent/cmd/security-agent/command" "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/log" "github.com/DataDog/datadog-agent/pkg/collector/runner" "github.com/DataDog/datadog-agent/pkg/compliance" + "github.com/DataDog/datadog-agent/pkg/security/common" "github.com/DataDog/datadog-agent/pkg/util/startstop" "github.com/DataDog/datadog-agent/pkg/version" ddgostatsd "github.com/DataDog/datadog-go/v5/statsd" @@ -31,7 +31,7 @@ func StartCompliance(log log.Component, config config.Component, hostname string return nil, nil } - endpoints, context, err := command.NewLogContextCompliance(log) + endpoints, context, err := common.NewLogContextCompliance() if err != nil { log.Error(err) } @@ -41,23 +41,25 @@ func StartCompliance(log log.Component, config config.Component, hostname string if err != nil { return nil, err } - if !metricsEnabled { - statsdClient = nil + + resolverOptions := compliance.ResolverOptions{ + Hostname: hostname, + HostRoot: os.Getenv("HOST_ROOT"), + DockerProvider: compliance.DefaultDockerProvider, + LinuxAuditProvider: compliance.DefaultLinuxAuditProvider, + } + + if metricsEnabled { + resolverOptions.StatsdClient = statsdClient } runner := runner.NewRunner() stopper.Add(runner) agent := compliance.NewAgent(compliance.AgentOptions{ - ResolverOptions: compliance.ResolverOptions{ - Hostname: hostname, - HostRoot: os.Getenv("HOST_ROOT"), - DockerProvider: compliance.DefaultDockerProvider, - LinuxAuditProvider: compliance.DefaultLinuxAuditProvider, - StatsdClient: statsdClient, - }, - ConfigDir: configDir, - Reporter: reporter, - CheckInterval: checkInterval, + ResolverOptions: resolverOptions, + ConfigDir: configDir, + Reporter: reporter, + CheckInterval: checkInterval, }) err = agent.Start() if err != nil { diff --git a/cmd/security-agent/subcommands/runtime/activity_dump.go b/cmd/security-agent/subcommands/runtime/activity_dump.go index c05d07027aa9f..201bcb0d49159 100644 --- a/cmd/security-agent/subcommands/runtime/activity_dump.go +++ b/cmd/security-agent/subcommands/runtime/activity_dump.go @@ -32,7 +32,7 @@ type activityDumpCliParams struct { containerID string comm string file string - timeout int + timeout string differentiateArgs bool localStorageDirectory string localStorageFormats []string @@ -156,11 +156,11 @@ func generateDumpCommands(globalParams *command.GlobalParams) []*cobra.Command { "", "a container identifier can be used to filter the activity dump from a specific container.", ) - activityDumpGenerateDumpCmd.Flags().IntVar( + activityDumpGenerateDumpCmd.Flags().StringVar( &cliParams.timeout, flags.Timeout, - 60, - "timeout for the activity dump in minutes", + "1m", + "timeout for the activity dump", ) activityDumpGenerateDumpCmd.Flags().BoolVar( &cliParams.differentiateArgs, @@ -283,7 +283,7 @@ func generateActivityDump(log log.Component, config config.Component, activityDu output, err := client.GenerateActivityDump(&api.ActivityDumpParams{ Comm: activityDumpArgs.comm, ContainerID: activityDumpArgs.containerID, - Timeout: int32(activityDumpArgs.timeout), + Timeout: activityDumpArgs.timeout, DifferentiateArgs: activityDumpArgs.differentiateArgs, Storage: storage, }) diff --git a/cmd/security-agent/subcommands/runtime/command.go b/cmd/security-agent/subcommands/runtime/command.go index b74d8ddd4d349..4c9fee239cee7 100644 --- a/cmd/security-agent/subcommands/runtime/command.go +++ b/cmd/security-agent/subcommands/runtime/command.go @@ -27,24 +27,17 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/log" pkgconfig "github.com/DataDog/datadog-agent/pkg/config" - "github.com/DataDog/datadog-agent/pkg/logs/auditor" - "github.com/DataDog/datadog-agent/pkg/logs/client" - logsconfig "github.com/DataDog/datadog-agent/pkg/logs/config" - "github.com/DataDog/datadog-agent/pkg/logs/diagnostic" - "github.com/DataDog/datadog-agent/pkg/logs/message" - "github.com/DataDog/datadog-agent/pkg/logs/pipeline" - "github.com/DataDog/datadog-agent/pkg/logs/sources" secagent "github.com/DataDog/datadog-agent/pkg/security/agent" - seccommon "github.com/DataDog/datadog-agent/pkg/security/common" + "github.com/DataDog/datadog-agent/pkg/security/common" pconfig "github.com/DataDog/datadog-agent/pkg/security/probe/config" "github.com/DataDog/datadog-agent/pkg/security/probe/kfilters" "github.com/DataDog/datadog-agent/pkg/security/proto/api" + "github.com/DataDog/datadog-agent/pkg/security/reporter" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" "github.com/DataDog/datadog-agent/pkg/security/seclog" "github.com/DataDog/datadog-agent/pkg/security/utils" - "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/fxutil" httputils "github.com/DataDog/datadog-agent/pkg/util/http" "github.com/DataDog/datadog-agent/pkg/util/startstop" @@ -659,46 +652,6 @@ func reloadRuntimePolicies(log log.Component, config config.Component) error { return nil } -type reporter struct { - logSource *sources.LogSource - logChan chan *message.Message -} - -func (r *reporter) ReportRaw(content []byte, service string, tags ...string) { - origin := message.NewOrigin(r.logSource) - origin.SetTags(tags) - origin.SetService(service) - msg := message.NewMessage(content, origin, message.StatusInfo, time.Now().UnixNano()) - r.logChan <- msg -} - -func newRuntimeReporter(log log.Component, config config.Component, stopper startstop.Stopper, sourceName, sourceType string, endpoints *logsconfig.Endpoints, context *client.DestinationsContext) (seccommon.RawReporter, error) { - health := health.RegisterLiveness("runtime-security") - - // setup the auditor - auditor := auditor.New(config.GetString("runtime_security_config.run_path"), "runtime-security-registry.json", pkgconfig.DefaultAuditorTTL, health) - auditor.Start() - stopper.Add(auditor) - - // setup the pipeline provider that provides pairs of processor and sender - pipelineProvider := pipeline.NewProvider(logsconfig.NumberOfPipelines, auditor, &diagnostic.NoopMessageReceiver{}, nil, endpoints, context) - pipelineProvider.Start() - stopper.Add(pipelineProvider) - - logSource := sources.NewLogSource( - sourceName, - &logsconfig.LogsConfig{ - Type: sourceType, - Source: sourceName, - }, - ) - logChan := pipelineProvider.NextPipelineChan() - return &reporter{ - logSource: logSource, - logChan: logChan, - }, nil -} - func StartRuntimeSecurity(log log.Component, config config.Component, hostname string, stopper startstop.Stopper, statsdClient *ddgostatsd.Client) (*secagent.RuntimeSecurityAgent, error) { enabled := config.GetBool("runtime_security_config.enabled") if !enabled { @@ -716,13 +669,14 @@ func StartRuntimeSecurity(log log.Component, config config.Component, hostname s } stopper.Add(agent) - endpoints, ctx, err := command.NewLogContextRuntime(log) + endpoints, ctx, err := common.NewLogContextRuntime() if err != nil { _ = log.Error(err) } stopper.Add(ctx) - reporter, err := newRuntimeReporter(log, config, stopper, "runtime-security-agent", "runtime-security", endpoints, ctx) + runPath := config.GetString("runtime_security_config.run_path") + reporter, err := reporter.NewCWSReporter(runPath, stopper, endpoints, ctx) if err != nil { return nil, err } diff --git a/cmd/security-agent/subcommands/runtime/security_profile.go b/cmd/security-agent/subcommands/runtime/security_profile.go index b118e351b9ed8..fac8f1d6b845a 100644 --- a/cmd/security-agent/subcommands/runtime/security_profile.go +++ b/cmd/security-agent/subcommands/runtime/security_profile.go @@ -19,10 +19,21 @@ import ( "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/log" + secagent "github.com/DataDog/datadog-agent/pkg/security/agent" + "github.com/DataDog/datadog-agent/pkg/security/proto/api" "github.com/DataDog/datadog-agent/pkg/security/security_profile/profile" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) +type securityProfileCliParams struct { + *command.GlobalParams + + includeCache bool + file string + imageName string + imageTag string +} + func securityProfileCommands(globalParams *command.GlobalParams) []*cobra.Command { securityProfileCmd := &cobra.Command{ Use: "security-profile", @@ -30,12 +41,14 @@ func securityProfileCommands(globalParams *command.GlobalParams) []*cobra.Comman } securityProfileCmd.AddCommand(securityProfileShowCommands(globalParams)...) + securityProfileCmd.AddCommand(listSecurityProfileCommands(globalParams)...) + securityProfileCmd.AddCommand(saveSecurityProfileCommands(globalParams)...) return []*cobra.Command{securityProfileCmd} } func securityProfileShowCommands(globalParams *command.GlobalParams) []*cobra.Command { - cliParams := &activityDumpCliParams{ + cliParams := &securityProfileCliParams{ GlobalParams: globalParams, } @@ -55,7 +68,7 @@ func securityProfileShowCommands(globalParams *command.GlobalParams) []*cobra.Co securityProfileShowCmd.Flags().StringVar( &cliParams.file, - flags.Input, + flags.SecurityProfileInput, "", "path to the activity dump file", ) @@ -63,8 +76,8 @@ func securityProfileShowCommands(globalParams *command.GlobalParams) []*cobra.Co return []*cobra.Command{securityProfileShowCmd} } -func showSecurityProfile(log log.Component, config config.Component, activityDumpArgs *activityDumpCliParams) error { - prof, err := profile.LoadProfileFromFile(activityDumpArgs.file) +func showSecurityProfile(log log.Component, config config.Component, args *securityProfileCliParams) error { + prof, err := profile.LoadProfileFromFile(args.file) if err != nil { return err } @@ -78,3 +91,159 @@ func showSecurityProfile(log log.Component, config config.Component, activityDum return nil } + +func listSecurityProfileCommands(globalParams *command.GlobalParams) []*cobra.Command { + cliParams := &securityProfileCliParams{ + GlobalParams: globalParams, + } + + securityProfileListCmd := &cobra.Command{ + Use: "list", + Short: "get the list of active security profiles", + RunE: func(cmd *cobra.Command, args []string) error { + return fxutil.OneShot(listSecurityProfiles, + fx.Supply(cliParams), + fx.Supply(core.BundleParams{ + ConfigParams: config.NewSecurityAgentParams(globalParams.ConfigFilePaths), + LogParams: log.LogForOneShot(command.LoggerName, "info", true)}), + core.Bundle, + ) + }, + } + + securityProfileListCmd.Flags().BoolVar( + &cliParams.includeCache, + flags.IncludeCache, + false, + "defines if the profiles in the Security Profile manager LRU cache should be returned", + ) + + return []*cobra.Command{securityProfileListCmd} +} + +func listSecurityProfiles(log log.Component, config config.Component, args *securityProfileCliParams) error { + client, err := secagent.NewRuntimeSecurityClient() + if err != nil { + return fmt.Errorf("unable to create a runtime security client instance: %w", err) + } + defer client.Close() + + output, err := client.ListSecurityProfiles(args.includeCache) + if err != nil { + return fmt.Errorf("unable send request to system-probe: %w", err) + } + if len(output.Error) > 0 { + return fmt.Errorf("security profile list request failed: %s", output.Error) + } + + if len(output.Profiles) > 0 { + fmt.Println("security profiles:") + for _, d := range output.Profiles { + printSecurityProfileMessage(d) + } + } else { + fmt.Println("no security profile found") + } + + return nil +} + +func printSecurityProfileMessage(msg *api.SecurityProfileMessage) { + prefix := " " + fmt.Printf("%s- name: %s\n", prefix, msg.GetMetadata().GetName()) + fmt.Printf("%s workload_selector:\n", prefix) + fmt.Printf("%s image_name: %v\n", prefix, msg.GetSelector().GetName()) + fmt.Printf("%s image_tag: %v\n", prefix, msg.GetSelector().GetTag()) + fmt.Printf("%s version: %v\n", prefix, msg.GetVersion()) + fmt.Printf("%s status: %v\n", prefix, msg.GetStatus()) + fmt.Printf("%s kernel_space:\n", prefix) + fmt.Printf("%s loaded: %v\n", prefix, msg.GetLoadedInKernel()) + if msg.GetLoadedInKernel() { + fmt.Printf("%s loaded_at: %v\n", prefix, msg.GetLoadedInKernelTimestamp()) + fmt.Printf("%s cookie: %v - 0x%x\n", prefix, msg.GetProfileCookie(), msg.GetProfileCookie()) + } + fmt.Printf("%s anomaly_detection_events: %v\n", prefix, msg.GetAnomalyDetectionEvents()) + if len(msg.GetLastAnomalies()) > 0 { + fmt.Printf("%s last_anomalies:\n", prefix) + for _, ano := range msg.GetLastAnomalies() { + fmt.Printf("%s - event_type: %s\n", prefix, ano.GetEventType()) + fmt.Printf("%s timestamp: %s\n", prefix, ano.GetTimestamp()) + fmt.Printf("%s is_stable: %v\n", prefix, ano.GetIsStableEventType()) + } + } + if len(msg.GetInstances()) > 0 { + fmt.Printf("%s instances:\n", prefix) + for _, inst := range msg.GetInstances() { + fmt.Printf("%s - container_id: %s\n", prefix, inst.GetContainerID()) + fmt.Printf("%s tags: %v\n", prefix, inst.GetTags()) + } + } + fmt.Printf("%s activity_tree_stats:\n", prefix) + fmt.Printf("%s approximate_size: %v\n", prefix, msg.GetStats().GetApproximateSize()) + fmt.Printf("%s process_nodes_count: %v\n", prefix, msg.GetStats().GetProcessNodesCount()) + fmt.Printf("%s file_nodes_count: %v\n", prefix, msg.GetStats().GetFileNodesCount()) + fmt.Printf("%s dns_nodes_count: %v\n", prefix, msg.GetStats().GetDNSNodesCount()) + fmt.Printf("%s socket_nodes_count: %v\n", prefix, msg.GetStats().GetSocketNodesCount()) + fmt.Printf("%s tags: %v\n", prefix, msg.GetTags()) +} + +func saveSecurityProfileCommands(globalParams *command.GlobalParams) []*cobra.Command { + cliParams := &securityProfileCliParams{ + GlobalParams: globalParams, + } + + securityProfileSaveCmd := &cobra.Command{ + Use: "save", + Short: "saves the requested security profile to disk", + RunE: func(cmd *cobra.Command, args []string) error { + return fxutil.OneShot(saveSecurityProfile, + fx.Supply(cliParams), + fx.Supply(core.BundleParams{ + ConfigParams: config.NewSecurityAgentParams(globalParams.ConfigFilePaths), + LogParams: log.LogForOneShot(command.LoggerName, "info", true)}), + core.Bundle, + ) + }, + } + + securityProfileSaveCmd.Flags().StringVar( + &cliParams.imageName, + flags.ImageName, + "", + "image name of the workload selector used to lookup the profile", + ) + _ = securityProfileSaveCmd.MarkFlagRequired(flags.ImageName) + securityProfileSaveCmd.Flags().StringVar( + &cliParams.imageTag, + flags.ImageTag, + "", + "image tag of the workload selector used to lookup the profile", + ) + _ = securityProfileSaveCmd.MarkFlagRequired(flags.ImageTag) + + return []*cobra.Command{securityProfileSaveCmd} +} + +func saveSecurityProfile(log log.Component, config config.Component, args *securityProfileCliParams) error { + client, err := secagent.NewRuntimeSecurityClient() + if err != nil { + return fmt.Errorf("unable to create a runtime security client instance: %w", err) + } + defer client.Close() + + output, err := client.SaveSecurityProfile(args.imageName, args.imageTag) + if err != nil { + return fmt.Errorf("unable send request to system-probe: %w", err) + } + if len(output.GetError()) > 0 { + return fmt.Errorf("security profile save request failed: %s", output.Error) + } + + if len(output.GetFile()) > 0 { + fmt.Printf("security profile successfully saved at: %v\n", output.GetFile()) + } else { + fmt.Println("security profile not found") + } + + return nil +} diff --git a/cmd/serverless-init/initcontainer/initcontainer.go b/cmd/serverless-init/initcontainer/initcontainer.go index b259d10e72352..bd03d948cc80e 100644 --- a/cmd/serverless-init/initcontainer/initcontainer.go +++ b/cmd/serverless-init/initcontainer/initcontainer.go @@ -8,7 +8,6 @@ package initcontainer import ( - "bytes" "context" "fmt" "os" @@ -46,13 +45,11 @@ func execute(cloudService cloudservice.CloudService, config *serverlessLog.Confi commandName, commandArgs := buildCommandParam(args) cmd := exec.Command(commandName, commandArgs...) cmd.Stdout = &serverlessLog.CustomWriter{ - LogConfig: config, - LineBuffer: bytes.Buffer{}, + LogConfig: config, } cmd.Stderr = &serverlessLog.CustomWriter{ - LogConfig: config, - LineBuffer: bytes.Buffer{}, - IsError: true, + LogConfig: config, + IsError: true, } err := cmd.Start() if err != nil { diff --git a/cmd/serverless-init/log/log.go b/cmd/serverless-init/log/log.go index 64c2663e6252c..1a5717b041702 100644 --- a/cmd/serverless-init/log/log.go +++ b/cmd/serverless-init/log/log.go @@ -6,8 +6,6 @@ package log import ( - "bufio" - "bytes" "fmt" "os" "strings" @@ -40,9 +38,8 @@ type Config struct { // CustomWriter wraps the log config to allow stdout/stderr redirection type CustomWriter struct { - LogConfig *Config - LineBuffer bytes.Buffer - IsError bool + LogConfig *Config + IsError bool } // CreateConfig builds and returns a log config @@ -96,17 +93,7 @@ func SetupLog(conf *Config, tags map[string]string) { func (cw *CustomWriter) Write(p []byte) (n int, err error) { fmt.Print(string(p)) - cw.LineBuffer.Write(p) - scanner := bufio.NewScanner(&cw.LineBuffer) - for scanner.Scan() { - logLine := scanner.Bytes() - // Don't write anything if we don't actually have a message. - // This can happen in the case of consecutive newlines. - if len(logLine) == 0 { - continue - } - Write(cw.LogConfig, logLine, cw.IsError) - } + Write(cw.LogConfig, p, cw.IsError) return len(p), nil } diff --git a/cmd/serverless-init/log/log_test.go b/cmd/serverless-init/log/log_test.go index 681bef043bcae..4b80300416c3f 100644 --- a/cmd/serverless-init/log/log_test.go +++ b/cmd/serverless-init/log/log_test.go @@ -6,7 +6,6 @@ package log import ( - "bytes" "testing" "time" @@ -14,66 +13,27 @@ import ( "github.com/stretchr/testify/assert" ) -func TestCustomWriterBuffered(t *testing.T) { +func TestCustomWriterUnbuffered(t *testing.T) { + // Custom writer should pass-through buffering behaviour of target process testContent := []byte("log line\nlog line\n") config := &Config{ channel: make(chan *config.ChannelMessage, 2), isEnabled: true, } cw := &CustomWriter{ - LogConfig: config, - LineBuffer: bytes.Buffer{}, + LogConfig: config, } go cw.Write(testContent) numMessages := 0 select { case message := <-config.channel: - assert.Equal(t, []byte("log line"), message.Content) + assert.Equal(t, []byte("log line\nlog line\n"), message.Content) numMessages++ case <-time.After(100 * time.Millisecond): t.FailNow() } - select { - case message := <-config.channel: - assert.Equal(t, []byte("log line"), message.Content) - numMessages++ - case <-time.After(100 * time.Millisecond): - t.FailNow() - } - - assert.Equal(t, 2, numMessages) -} - -func TestCustomWriterBufferedConsecutiveNewlines(t *testing.T) { - testContent := []byte("\nlog line\n\n\n\nlog line2\n\n\n") - config := &Config{ - channel: make(chan *config.ChannelMessage, 2), - isEnabled: true, - } - cw := &CustomWriter{ - LogConfig: config, - LineBuffer: bytes.Buffer{}, - } - go cw.Write(testContent) - numMessages := 0 - select { - case message := <-config.channel: - assert.Equal(t, []byte("log line"), message.Content) - numMessages++ - case <-time.After(100 * time.Millisecond): - t.FailNow() - } - - select { - case message := <-config.channel: - assert.Equal(t, []byte("log line2"), message.Content) - numMessages++ - case <-time.After(100 * time.Millisecond): - t.FailNow() - } - - assert.Equal(t, 2, numMessages) + assert.Equal(t, 1, numMessages) } func TestWriteEnabled(t *testing.T) { diff --git a/cmd/serverless-init/main.go b/cmd/serverless-init/main.go index 5aff18c4d43d8..b11b6681a6d6a 100644 --- a/cmd/serverless-init/main.go +++ b/cmd/serverless-init/main.go @@ -8,7 +8,6 @@ package main import ( - "github.com/DataDog/datadog-agent/pkg/serverless/tags" "os" "time" @@ -21,6 +20,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/serverless/metrics" "github.com/DataDog/datadog-agent/pkg/serverless/otlp" "github.com/DataDog/datadog-agent/pkg/serverless/random" + "github.com/DataDog/datadog-agent/pkg/serverless/tags" "github.com/DataDog/datadog-agent/pkg/serverless/trace" logger "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -81,7 +81,7 @@ func setupMetricAgent(tags map[string]string) *metrics.ServerlessMetricAgent { config.Datadog.Set("use_v2_api.series", false) metricAgent := &metrics.ServerlessMetricAgent{} // we don't want to add the container_id tag to metrics for cardinality reasons - delete(tags, "container_id") + tags = tag.WithoutContainerID(tags) tagArray := tag.GetBaseTagsArrayWithMetadataTags(tags) metricAgent.Start(5*time.Second, &metrics.MetricConfig{}, &metrics.MetricDogStatsD{}) metricAgent.SetExtraTags(tagArray) diff --git a/cmd/serverless-init/main_test.go b/cmd/serverless-init/main_test.go index 22717344dd57f..1f73102f80def 100644 --- a/cmd/serverless-init/main_test.go +++ b/cmd/serverless-init/main_test.go @@ -8,14 +8,23 @@ package main import ( + "strings" + "testing" + "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/serverless/logs" "github.com/spf13/cast" "github.com/stretchr/testify/assert" - "testing" ) +func setupTest() { + config.Datadog = config.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + config.InitConfig(config.Datadog) +} + func TestProxyNotLoaded(t *testing.T) { + setupTest() + proxyHttp := "abc:1234" proxyHttps := "abc:5678" t.Setenv("DD_PROXY_HTTP", proxyHttp) @@ -27,6 +36,8 @@ func TestProxyNotLoaded(t *testing.T) { } func TestProxyLoaded(t *testing.T) { + setupTest() + proxyHttp := "abc:1234" proxyHttps := "abc:5678" t.Setenv("DD_PROXY_HTTP", proxyHttp) @@ -39,6 +50,8 @@ func TestProxyLoaded(t *testing.T) { } func TestTagsSetup(t *testing.T) { + setupTest() + ddTagsEnv := "key1:value1 key2:value2 key3:value3:4" ddExtraTagsEnv := "key22:value22 key23:value23" t.Setenv("DD_TAGS", ddTagsEnv) @@ -48,7 +61,9 @@ func TestTagsSetup(t *testing.T) { allTags := append(ddTags, ddExtraTags...) - _, _, _, metricAgent := setup() + _, _, traceAgent, metricAgent := setup() + defer traceAgent.Stop() + defer metricAgent.Stop() assert.Subset(t, metricAgent.GetExtraTags(), allTags) assert.Subset(t, logs.GetLogsTags(), allTags) } diff --git a/cmd/serverless-init/tag/tag.go b/cmd/serverless-init/tag/tag.go index 661e366c321e0..5b99c292dac7d 100644 --- a/cmd/serverless-init/tag/tag.go +++ b/cmd/serverless-init/tag/tag.go @@ -63,3 +63,14 @@ func GetBaseTagsArrayWithMetadataTags(metadata map[string]string) []string { tagsMap := GetBaseTagsMapWithMetadata(metadata) return tags.MapToArray(tagsMap) } + +// WithoutContainerID creates a new tag map without the `container_id` tag +func WithoutContainerID(tags map[string]string) map[string]string { + newTags := make(map[string]string, len(tags)) + for k, v := range tags { + if k != "container_id" { + newTags[k] = v + } + } + return newTags +} diff --git a/cmd/system-probe/common/common.go b/cmd/system-probe/common/common.go index 1ff845c8523c3..42ba3bd41906b 100644 --- a/cmd/system-probe/common/common.go +++ b/cmd/system-probe/common/common.go @@ -8,9 +8,14 @@ package common import ( "context" "net/http" + + "github.com/DataDog/datadog-agent/cmd/system-probe/utils" ) var ( + // MemoryMonitor is the global system-probe memory monitor + MemoryMonitor *utils.MemoryMonitor + // ExpvarServer is the global expvar server ExpvarServer *http.Server diff --git a/cmd/system-probe/config/adjust.go b/cmd/system-probe/config/adjust.go new file mode 100644 index 0000000000000..467e25714f1e1 --- /dev/null +++ b/cmd/system-probe/config/adjust.go @@ -0,0 +1,153 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package config + +import ( + "fmt" + "sync" + + "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +var adjustMtx sync.Mutex + +// Adjust makes changes to the raw config based on deprecations and inferences. +func Adjust(cfg config.Config) { + if IsAdjusted(cfg) { + return + } + adjustMtx.Lock() + defer adjustMtx.Unlock() + + deprecateString(cfg, spNS("log_level"), "log_level") + deprecateString(cfg, spNS("log_file"), "log_file") + + usmEnabled := cfg.GetBool(smNS("enabled")) + dsmEnabled := cfg.GetBool(dsmNS("enabled")) + // this check must come first, so we can accurately tell if system_probe was explicitly enabled + if cfg.GetBool(spNS("enabled")) && + !cfg.IsSet(netNS("enabled")) && + !usmEnabled && + !dsmEnabled { + // This case exists to preserve backwards compatibility. If system_probe_config.enabled is explicitly set to true, and there is no network_config block, + // enable the connections/network check. + log.Warn(deprecationMessage(spNS("enabled"), netNS("enabled"))) + // ensure others can key off of this single config value for NPM status + cfg.Set(netNS("enabled"), true) + } + + validateString(cfg, spNS("sysprobe_socket"), defaultSystemProbeAddress, ValidateSocketAddress) + + adjustNetwork(cfg) + adjustUSM(cfg) + adjustSecurity(cfg) + + cfg.Set(spNS("adjusted"), true) +} + +// IsAdjusted returns whether the configuration has already been adjusted by Adjust +func IsAdjusted(cfg config.Config) bool { + adjustMtx.Lock() + defer adjustMtx.Unlock() + return cfg.GetBool(spNS("adjusted")) +} + +// validateString validates the string configuration value at `key` using a custom provided function `valFn`. +// If `key` is not set or `valFn` returns an error, the `defaultVal` is used instead. +func validateString(cfg config.Config, key string, defaultVal string, valFn func(string) error) { + if cfg.IsSet(key) { + if err := valFn(cfg.GetString(key)); err != nil { + log.Errorf("error validating `%s`: %s, using default value of `%s`", key, err, defaultVal) + cfg.Set(key, defaultVal) + } + } else { + cfg.Set(key, defaultVal) + } +} + +// validateInt validates the int configuration value at `key` using a custom provided function `valFn`. +// If `key` is not set or `valFn` returns an error, the `defaultVal` is used instead. +func validateInt(cfg config.Config, key string, defaultVal int, valFn func(int) error) { + if cfg.IsSet(key) { + if err := valFn(cfg.GetInt(key)); err != nil { + log.Errorf("error validating `%s`: %s, using default value of `%d`", key, err, defaultVal) + cfg.Set(key, defaultVal) + } + } else { + cfg.Set(key, defaultVal) + } +} + +// validateInt64 validates the int64 configuration value at `key` using a custom provided function `valFn`. +// If `key` is not set or `valFn` returns an error, the `defaultVal` is used instead. +func validateInt64(cfg config.Config, key string, defaultVal int64, valFn func(int64) error) { + if cfg.IsSet(key) { + if err := valFn(cfg.GetInt64(key)); err != nil { + log.Errorf("error validating `%s`: %s. using default value of `%d`", key, err, defaultVal) + cfg.Set(key, defaultVal) + } + } else { + cfg.Set(key, defaultVal) + } +} + +// applyDefault sets configuration `key` to `defaultVal` only if not previously set. +func applyDefault(cfg config.Config, key string, defaultVal interface{}) { + if !cfg.IsSet(key) { + cfg.Set(key, defaultVal) + } +} + +// deprecateBool logs a deprecation message if `oldkey` is used. +// It sets `newkey` to the value obtained from `getFn`, but only if `oldkey` is set and `newkey` is not set. +func deprecateBool(cfg config.Config, oldkey string, newkey string) { + deprecateCustom(cfg, oldkey, newkey, func(cfg config.Config) interface{} { + return cfg.GetBool(oldkey) + }) +} + +// deprecateString logs a deprecation message if `oldkey` is used. +// It sets `newkey` to the value obtained from `getFn`, but only if `oldkey` is set and `newkey` is not set. +func deprecateString(cfg config.Config, oldkey string, newkey string) { + deprecateCustom(cfg, oldkey, newkey, func(cfg config.Config) interface{} { + return cfg.GetString(oldkey) + }) +} + +// deprecateCustom logs a deprecation message if `oldkey` is used. +// It sets `newkey` to the value obtained from `getFn`, but only if `oldkey` is set and `newkey` is not set. +func deprecateCustom(cfg config.Config, oldkey string, newkey string, getFn func(config.Config) interface{}) { + if cfg.IsSet(oldkey) { + log.Warn(deprecationMessage(oldkey, newkey)) + if !cfg.IsSet(newkey) { + cfg.Set(newkey, getFn(cfg)) + } + } +} + +// deprecationMessage returns the standard deprecation message +func deprecationMessage(oldkey, newkey string) string { + return fmt.Sprintf("configuration key `%s` is deprecated, use `%s` instead", oldkey, newkey) +} + +// limitMaxInt logs a warning and sets `key` to `max` if the value exceeds `max`. +func limitMaxInt(cfg config.Config, key string, max int) { + val := cfg.GetInt(key) + if val > max { + log.Warnf("configuration key `%s` was set to `%d`, using maximum value `%d` instead", key, val, max) + cfg.Set(key, max) + } +} + +// limitMaxInt64 logs a warning and sets `key` to `max` if the value exceeds `max`. +func limitMaxInt64(cfg config.Config, key string, max int64) { + val := cfg.GetInt64(key) + if val > max { + log.Warnf("configuration key `%s` was set to `%d`, using maximum value `%d` instead", key, val, max) + cfg.Set(key, max) + } +} diff --git a/cmd/system-probe/config/adjust_npm.go b/cmd/system-probe/config/adjust_npm.go new file mode 100644 index 0000000000000..63640dbd9f469 --- /dev/null +++ b/cmd/system-probe/config/adjust_npm.go @@ -0,0 +1,79 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package config + +import ( + "fmt" + "math" + "runtime" + + "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/util/kernel" +) + +const ( + maxConnsMessageBatchSize = 1000 + maxOffsetThreshold = 3000 + defaultMaxProcessesTracked = 1024 + defaultMaxTrackedConnections = 65536 +) + +func adjustNetwork(cfg config.Config) { + limitMaxInt(cfg, spNS("max_conns_per_message"), maxConnsMessageBatchSize) + + if cfg.GetBool(spNS("disable_tcp")) { + cfg.Set(netNS("collect_tcp_v4"), false) + cfg.Set(netNS("collect_tcp_v6"), false) + } + if cfg.GetBool(spNS("disable_udp")) { + cfg.Set(netNS("collect_udp_v4"), false) + cfg.Set(netNS("collect_udp_v6"), false) + } + if cfg.GetBool(spNS("disable_ipv6")) || !kernel.IsIPv6Enabled() { + cfg.Set(netNS("collect_tcp_v6"), false) + cfg.Set(netNS("collect_udp_v6"), false) + } + + if runtime.GOOS == "windows" { + validateInt(cfg, spNS("closed_connection_flush_threshold"), 0, func(v int) error { + if v != 0 && v < 1024 { + return fmt.Errorf("closed connection notification threshold set to invalid value %d. resetting to default", v) + } + return nil + }) + } + + validateInt64(cfg, spNS("max_tracked_connections"), defaultMaxTrackedConnections, func(v int64) error { + if v <= 0 { + return fmt.Errorf("must be a positive value") + } + return nil + }) + limitMaxInt64(cfg, spNS("max_tracked_connections"), math.MaxUint32) + // make sure max_closed_connections_buffered is equal to max_tracked_connections, + // if the former is not set. this helps with lowering or eliminating dropped + // closed connections in environments with mostly short-lived connections + validateInt64(cfg, spNS("max_closed_connections_buffered"), cfg.GetInt64(spNS("max_tracked_connections")), func(v int64) error { + if v <= 0 { + return fmt.Errorf("must be a positive value") + } + return nil + }) + limitMaxInt64(cfg, spNS("max_closed_connections_buffered"), math.MaxUint32) + + limitMaxInt(cfg, spNS("offset_guess_threshold"), maxOffsetThreshold) + + if !cfg.GetBool(netNS("enable_root_netns")) { + cfg.Set(spNS("enable_conntrack_all_namespaces"), false) + } + + validateInt(cfg, evNS("network_process", "max_processes_tracked"), defaultMaxProcessesTracked, func(v int) error { + if v <= 0 { + return fmt.Errorf("`%d` is 0 or less", v) + } + return nil + }) +} diff --git a/cmd/system-probe/config/adjust_security.go b/cmd/system-probe/config/adjust_security.go new file mode 100644 index 0000000000000..4f8b28674b5db --- /dev/null +++ b/cmd/system-probe/config/adjust_security.go @@ -0,0 +1,31 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package config + +import ( + "time" + + "github.com/DataDog/datadog-agent/pkg/config" +) + +func adjustSecurity(cfg config.Config) { + deprecateCustom(cfg, secNS("activity_dump.cgroup_dump_timeout"), secNS("activity_dump.dump_duration"), func(cfg config.Config) interface{} { + // convert old minutes int value to time.Duration + return time.Duration(cfg.GetInt(secNS("activity_dump.cgroup_dump_timeout"))) * time.Minute + }) + + if cfg.GetBool(secNS("enabled")) { + // if runtime is enabled then we force fim + cfg.Set(secNS("fim_enabled"), true) + } else { + // if runtime is disabled then we force disable activity dumps and security profiles + cfg.Set(secNS("activity_dump.enabled"), false) + cfg.Set(secNS("security_profile.enabled"), false) + } + + // further adjustments done in RuntimeSecurityConfig.sanitize + // because it requires access to security packages +} diff --git a/cmd/system-probe/config/adjust_usm.go b/cmd/system-probe/config/adjust_usm.go new file mode 100644 index 0000000000000..15e87f3c106c7 --- /dev/null +++ b/cmd/system-probe/config/adjust_usm.go @@ -0,0 +1,51 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package config + +import ( + "fmt" + + "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +const ( + maxHTTPFrag = 160 +) + +func adjustUSM(cfg config.Config) { + deprecateBool(cfg, netNS("enable_http_monitoring"), smNS("enable_http_monitoring")) + + if cfg.GetBool(dsmNS("enabled")) { + // DSM infers USM + cfg.Set(smNS("enabled"), true) + } + + if cfg.GetBool(smNS("enabled")) { + // USM infers HTTP + cfg.Set(smNS("enable_http_monitoring"), true) + applyDefault(cfg, netNS("enable_https_monitoring"), true) + applyDefault(cfg, spNS("enable_runtime_compiler"), true) + applyDefault(cfg, spNS("enable_kernel_header_download"), true) + } + + if cfg.GetBool(smNS("process_service_inference", "enabled")) && + !cfg.GetBool(smNS("enabled")) && + !cfg.GetBool(dsmNS("enabled")) { + log.Info("universal service monitoring and data streams monitoring are disabled, disabling process service inference") + cfg.Set(smNS("process_service_inference", "enabled"), false) + } + + validateInt(cfg, netNS("http_notification_threshold"), cfg.GetInt(netNS("max_tracked_http_connections"))/2, func(v int) error { + limit := cfg.GetInt(netNS("max_tracked_http_connections")) + if v >= limit { + return fmt.Errorf("notification threshold %d set higher than tracked connections %d", v, limit) + } + return nil + }) + + limitMaxInt64(cfg, netNS("http_max_request_fragment"), maxHTTPFrag) +} diff --git a/cmd/system-probe/config/config.go b/cmd/system-probe/config/config.go index 7393ca1ba0e00..ce944a39dec9f 100644 --- a/cmd/system-probe/config/config.go +++ b/cmd/system-probe/config/config.go @@ -16,7 +16,6 @@ import ( "github.com/DataDog/viper" aconfig "github.com/DataDog/datadog-agent/pkg/config" - "github.com/DataDog/datadog-agent/pkg/util/log" ) // ModuleName is a typed alias for string, used only for module names @@ -25,13 +24,6 @@ type ModuleName string const ( // Namespace is the top-level configuration key that all system-probe settings are nested underneath Namespace = "system_probe_config" - spNS = Namespace - smNS = "service_monitoring_config" - dsmNS = "data_streams_config" - diNS = "dynamic_instrumentation" - - defaultConnsMessageBatchSize = 600 - maxConnsMessageBatchSize = 1000 ) // system-probe module names @@ -45,10 +37,6 @@ const ( DynamicInstrumentationModule ModuleName = "dynamic_instrumentation" ) -func key(pieces ...string) string { - return strings.Join(pieces, ".") -} - // Config represents the configuration options for the system-probe type Config struct { Enabled bool @@ -81,6 +69,11 @@ func NewCustom(configPath string, loadSecrets bool) (*Config, error) { } func newSysprobeConfig(configPath string, loadSecrets bool) (*Config, error) { + // System probe is not supported on darwin, so we should fail gracefully in this case. + if runtime.GOOS == "darwin" { + return &Config{}, nil + } + aconfig.SystemProbe.SetConfigName("system-probe") // set the paths where a config file is expected if len(configPath) != 0 { @@ -98,15 +91,6 @@ func newSysprobeConfig(configPath string, loadSecrets bool) (*Config, error) { // load the configuration _, err := aconfig.LoadCustom(aconfig.SystemProbe, "system-probe", loadSecrets, aconfig.Datadog.GetEnvVars()) if err != nil { - // System probe is not supported on darwin, so we should fail gracefully in this case. - if runtime.GOOS != "darwin" { - if errors.Is(err, os.ErrPermission) { - log.Warnf("Error loading config: %v (check config file permissions for dd-agent user)", err) - } else { - log.Warnf("Error loading config: %v", err) - } - } - var e viper.ConfigFileNotFoundError if errors.As(err, &e) || errors.Is(err, os.ErrNotExist) { // do nothing, we can ignore a missing system-probe.yaml config file @@ -126,100 +110,55 @@ func newSysprobeConfig(configPath string, loadSecrets bool) (*Config, error) { func load() (*Config, error) { cfg := aconfig.SystemProbe + Adjust(cfg) c := &Config{ - Enabled: cfg.GetBool(key(spNS, "enabled")), + Enabled: cfg.GetBool(spNS("enabled")), EnabledModules: make(map[ModuleName]struct{}), - ExternalSystemProbe: cfg.GetBool(key(spNS, "external")), + ExternalSystemProbe: cfg.GetBool(spNS("external")), - SocketAddress: cfg.GetString(key(spNS, "sysprobe_socket")), - MaxConnsPerMessage: cfg.GetInt(key(spNS, "max_conns_per_message")), + SocketAddress: cfg.GetString(spNS("sysprobe_socket")), + MaxConnsPerMessage: cfg.GetInt(spNS("max_conns_per_message")), LogFile: cfg.GetString("log_file"), LogLevel: cfg.GetString("log_level"), - DebugPort: cfg.GetInt(key(spNS, "debug_port")), - TelemetryEnabled: cfg.GetBool(key(spNS, "telemetry_enabled")), + DebugPort: cfg.GetInt(spNS("debug_port")), + TelemetryEnabled: cfg.GetBool(spNS("telemetry_enabled")), StatsdHost: aconfig.GetBindHost(), StatsdPort: cfg.GetInt("dogstatsd_port"), } - // backwards compatible log settings - if !cfg.IsSet("log_level") && cfg.IsSet(key(spNS, "log_level")) { - c.LogLevel = cfg.GetString(key(spNS, "log_level")) - cfg.Set("log_level", c.LogLevel) - } - if !cfg.IsSet("log_file") && cfg.IsSet(key(spNS, "log_file")) { - c.LogFile = cfg.GetString(key(spNS, "log_file")) - cfg.Set("log_file", c.LogFile) - } - - if c.MaxConnsPerMessage > maxConnsMessageBatchSize { - log.Warn("Overriding the configured connections count per message limit because it exceeds maximum") - c.MaxConnsPerMessage = defaultConnsMessageBatchSize - cfg.Set(key(spNS, "max_conns_per_message"), c.MaxConnsPerMessage) - } - - // this check must come first, so we can accurately tell if system_probe was explicitly enabled - npmEnabled := cfg.GetBool("network_config.enabled") - usmEnabled := cfg.GetBool(key(smNS, "enabled")) - dsmEnabled := cfg.GetBool(key(dsmNS, "enabled")) - - if c.Enabled && !cfg.IsSet("network_config.enabled") && !usmEnabled && !dsmEnabled { - // This case exists to preserve backwards compatibility. If system_probe_config.enabled is explicitly set to true, and there is no network_config block, - // enable the connections/network check. - log.Info("`system_probe_config.enabled` is deprecated, enable NPM with `network_config.enabled` instead") - // ensure others can key off of this single config value for NPM status - cfg.Set("network_config.enabled", true) - npmEnabled = true - } + npmEnabled := cfg.GetBool(netNS("enabled")) + usmEnabled := cfg.GetBool(smNS("enabled")) + dsmEnabled := cfg.GetBool(dsmNS("enabled")) if npmEnabled || usmEnabled || dsmEnabled { c.EnabledModules[NetworkTracerModule] = struct{}{} } - if cfg.GetBool(key(spNS, "enable_tcp_queue_length")) { + if cfg.GetBool(spNS("enable_tcp_queue_length")) { c.EnabledModules[TCPQueueLengthTracerModule] = struct{}{} } - if cfg.GetBool(key(spNS, "enable_oom_kill")) { + if cfg.GetBool(spNS("enable_oom_kill")) { c.EnabledModules[OOMKillProbeModule] = struct{}{} } - if cfg.GetBool("runtime_security_config.enabled") || - cfg.GetBool("runtime_security_config.fim_enabled") || - cfg.GetBool("event_monitoring_config.process.enabled") || - (c.ModuleIsEnabled(NetworkTracerModule) && cfg.GetBool("event_monitoring_config.network_process.enabled")) { + if cfg.GetBool(secNS("enabled")) || + cfg.GetBool(secNS("fim_enabled")) || + cfg.GetBool(evNS("process.enabled")) || + (c.ModuleIsEnabled(NetworkTracerModule) && cfg.GetBool(evNS("network_process.enabled"))) { c.EnabledModules[EventMonitorModule] = struct{}{} } - if cfg.GetBool(key(spNS, "process_config.enabled")) { + if cfg.GetBool(spNS("process_config.enabled")) { c.EnabledModules[ProcessModule] = struct{}{} } - - if cfg.GetBool(key(diNS, "enabled")) { + if cfg.GetBool(diNS("enabled")) { c.EnabledModules[DynamicInstrumentationModule] = struct{}{} } - if len(c.EnabledModules) > 0 { - c.Enabled = true - if err := ValidateSocketAddress(c.SocketAddress); err != nil { - log.Errorf("Could not parse %s.sysprobe_socket: %s", spNS, err) - c.SocketAddress = defaultSystemProbeAddress - } - } else { - c.Enabled = false - c.SocketAddress = "" - } - - cfg.Set(key(spNS, "sysprobe_socket"), c.SocketAddress) - cfg.Set(key(spNS, "enabled"), c.Enabled) - - if cfg.GetBool(key(smNS, "process_service_inference", "enabled")) { - if !usmEnabled && !dsmEnabled { - log.Info("Both service monitoring and data streams monitoring are disabled, disabling process service inference") - cfg.Set(key(smNS, "process_service_inference", "enabled"), false) - } else { - log.Info("process service inference is enabled") - } - } + c.Enabled = len(c.EnabledModules) > 0 + // only allowed raw config adjustments here, otherwise use Adjust function + cfg.Set(spNS("enabled"), c.Enabled) return c, nil } @@ -230,12 +169,7 @@ func (c Config) ModuleIsEnabled(modName ModuleName) bool { return ok } -// SetupOptionalDatadogConfig loads the datadog.yaml config file but will not fail on a missing file -func SetupOptionalDatadogConfig() error { - return SetupOptionalDatadogConfigWithDir(defaultConfigDir, "") -} - -// SetupOptionalDatadogConfig loads the datadog.yaml config file from a given config directory but will not fail on a missing file +// SetupOptionalDatadogConfigWithDir loads the datadog.yaml config file from a given config directory but will not fail on a missing file func SetupOptionalDatadogConfigWithDir(configDir, configFile string) error { aconfig.Datadog.AddConfigPath(configDir) if configFile != "" { diff --git a/cmd/system-probe/config/config_linux.go b/cmd/system-probe/config/config_linux.go index 7ad2a851d4c3d..376a7291f4138 100644 --- a/cmd/system-probe/config/config_linux.go +++ b/cmd/system-probe/config/config_linux.go @@ -22,7 +22,7 @@ const ( // ValidateSocketAddress validates that the sysprobe socket config option is of the correct format. func ValidateSocketAddress(sockPath string) error { if !filepath.IsAbs(sockPath) { - return fmt.Errorf("socket path must be an absolute file path: %s", sockPath) + return fmt.Errorf("socket path must be an absolute file path: `%s`", sockPath) } return nil } diff --git a/cmd/system-probe/config/config_test.go b/cmd/system-probe/config/config_test.go index ad7215940a20f..4c9d7c7605970 100644 --- a/cmd/system-probe/config/config_test.go +++ b/cmd/system-probe/config/config_test.go @@ -3,6 +3,8 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. +//go:build linux || windows + package config import ( @@ -51,13 +53,15 @@ func TestEventMonitor(t *testing.T) { {cws: true, fim: true, process_events: true, network_events: true, enabled: true}, } { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Logf("%+v\n", tc) t.Setenv("DD_RUNTIME_SECURITY_CONFIG_ENABLED", strconv.FormatBool(tc.cws)) t.Setenv("DD_RUNTIME_SECURITY_CONFIG_FIM_ENABLED", strconv.FormatBool(tc.fim)) t.Setenv("DD_SYSTEM_PROBE_EVENT_MONITORING_PROCESS_ENABLED", strconv.FormatBool(tc.process_events)) t.Setenv("DD_SYSTEM_PROBE_EVENT_MONITORING_NETWORK_PROCESS_ENABLED", strconv.FormatBool(tc.network_events)) + t.Setenv("DD_SYSTEM_PROBE_NETWORK_ENABLED", strconv.FormatBool(tc.network_events)) - cfg, err := New("") - t.Log(cfg) + cfg, err := New("/doesnotexist") + t.Logf("%+v\n", cfg) require.NoError(t, err) assert.Equal(t, tc.enabled, cfg.ModuleIsEnabled(EventMonitorModule)) }) diff --git a/cmd/system-probe/config/ns.go b/cmd/system-probe/config/ns.go new file mode 100644 index 0000000000000..90a138c0d8917 --- /dev/null +++ b/cmd/system-probe/config/ns.go @@ -0,0 +1,47 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package config + +import "strings" + +// spNS adds `system_probe_config` namespace to configuration key +func spNS(k ...string) string { + return nskey("system_probe_config", k...) +} + +// netNS adds `network_config` namespace to configuration key +func netNS(k ...string) string { + return nskey("network_config", k...) +} + +// smNS adds `service_monitoring_config` namespace to configuration key +func smNS(k ...string) string { + return nskey("service_monitoring_config", k...) +} + +// dsmNS adds `data_streams_config` namespace to configuration key +func dsmNS(k ...string) string { + return nskey("data_streams_config", k...) +} + +// diNS adds `dynamic_instrumentation` namespace to configuration key +func diNS(k ...string) string { + return nskey("dynamic_instrumentation", k...) +} + +// secNS adds `runtime_security_config` namespace to configuration key +func secNS(k ...string) string { + return nskey("runtime_security_config", k...) +} + +// evNS adds `event_monitoring_config` namespace to configuration key +func evNS(k ...string) string { + return nskey("event_monitoring_config", k...) +} + +func nskey(ns string, pieces ...string) string { + return strings.Join(append([]string{ns}, pieces...), ".") +} diff --git a/cmd/system-probe/subcommands/run/command.go b/cmd/system-probe/subcommands/run/command.go index 1795db9d06c83..a1a5e48b064fa 100644 --- a/cmd/system-probe/subcommands/run/command.go +++ b/cmd/system-probe/subcommands/run/command.go @@ -25,6 +25,7 @@ import ( "github.com/DataDog/datadog-agent/cmd/system-probe/api/module" "github.com/DataDog/datadog-agent/cmd/system-probe/command" "github.com/DataDog/datadog-agent/cmd/system-probe/common" + "github.com/DataDog/datadog-agent/cmd/system-probe/utils" "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/log" "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig" @@ -88,9 +89,6 @@ func run(log log.Component, config config.Component, sysprobeconfig sysprobeconf // prepare go runtime ddruntime.SetMaxProcs() - if err := ddruntime.SetGoMemLimit(ddconfig.IsContainerized()); err != nil { - log.Debugf("Couldn't set Go memory limit: %s", err) - } // Setup a channel to catch OS signals signalCh := make(chan os.Signal, 1) @@ -177,6 +175,18 @@ func startSystemProbe(cliParams *cliParams, log log.Component, sysprobeconfig sy log.Warnf("cannot setup core dumps: %s, core dumps might not be available after a crash", err) } + if sysprobeconfig.GetBool("system_probe_config.memory_controller.enabled") { + memoryPressureLevels := sysprobeconfig.GetStringMapString("system_probe_config.memory_controller.pressure_levels") + memoryThresholds := sysprobeconfig.GetStringMapString("system_probe_config.memory_controller.thresholds") + hierarchy := sysprobeconfig.GetString("system_probe_config.memory_controller.hierarchy") + common.MemoryMonitor, err = utils.NewMemoryMonitor(hierarchy, ddconfig.IsContainerized(), memoryPressureLevels, memoryThresholds) + if err != nil { + log.Warnf("cannot set up memory controller: %s", err) + } else { + common.MemoryMonitor.Start() + } + } + if err := initRuntimeSettings(); err != nil { log.Warnf("cannot initialize the runtime settings: %s", err) } @@ -234,7 +244,9 @@ func stopSystemProbe(cliParams *cliParams) { } } profiling.Stop() - + if common.MemoryMonitor != nil { + common.MemoryMonitor.Stop() + } _ = os.Remove(cliParams.pidfilePath) // gracefully shut down any component diff --git a/cmd/system-probe/utils/memory_monitor_linux.go b/cmd/system-probe/utils/memory_monitor_linux.go new file mode 100644 index 0000000000000..f77129c31256d --- /dev/null +++ b/cmd/system-probe/utils/memory_monitor_linux.go @@ -0,0 +1,142 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux +// +build linux + +package utils + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "runtime/pprof" + "sort" + "strconv" + "strings" + + "github.com/alecthomas/units" + + "github.com/DataDog/datadog-agent/pkg/util/cgroups" + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +// MemoryMonitor monitors cgroups' memory usage +type MemoryMonitor = cgroups.MemoryController + +const maxProfileCount = 10 + +func getActionCallback(action string) (func(), string, error) { + switch action { + case "gc": + return runtime.GC, "garbage collector", nil + case "log": + return func() {}, "nothing", nil + case "profile": + return func() { + tmpDir := os.TempDir() + tmpFiles, err := os.ReadDir(tmpDir) + if err != nil { + log.Errorf("Failed to list old memory profiles: %s", err) + } else { + var oldProfiles []os.FileInfo + for _, tmpFile := range tmpFiles { + if strings.HasPrefix(tmpFile.Name(), "memcg-pprof-heap") { + tmpFileInfo, err := tmpFile.Info() + if err != nil { + log.Errorf("Failed to get file info for: %s", tmpFile.Name()) + continue + } + oldProfiles = append(oldProfiles, tmpFileInfo) + } + } + + sort.Slice(oldProfiles, func(i, j int) bool { + return oldProfiles[i].ModTime().After(oldProfiles[j].ModTime()) + }) + + for i := len(oldProfiles) - 1; i >= 0 && i >= maxProfileCount-1; i-- { + os.Remove(filepath.Join(tmpDir, oldProfiles[i].Name())) + oldProfiles = oldProfiles[:i] + } + } + + memProfile, err := os.CreateTemp(tmpDir, "memcg-pprof-heap") + if err != nil { + log.Errorf("Failed to generate memory profile: %s", err) + return + } + + defer func() { + if err := memProfile.Close(); err != nil { + log.Errorf("Failed to generate memory profile: %s", err) + } + }() + + if err := pprof.WriteHeapProfile(memProfile); err != nil { + log.Errorf("Failed to generate memory profile: %s", err) + return + } + + log.Infof("Wrote memory profile to %s", memProfile.Name()) + }, "heap profile", nil + default: + return nil, "", fmt.Errorf("unknown memory controller action '%s'", action) + } +} + +// NewMemoryMonitor instantiates a new memory monitor +func NewMemoryMonitor(kind string, containerized bool, pressureLevels map[string]string, thresholds map[string]string) (*MemoryMonitor, error) { + memoryMonitors := make([]cgroups.MemoryMonitor, 0, len(pressureLevels)+len(thresholds)) + + for pressureLevel, action := range pressureLevels { + actionCallback, name, err := getActionCallback(action) + if err != nil { + return nil, err + } + + log.Infof("New memory pressure monitor on level %s with action %s", pressureLevel, name) + memoryMonitors = append(memoryMonitors, cgroups.MemoryPressureMonitor(func() { + log.Infof("Memory pressure reached level '%s', triggering %s", pressureLevel, name) + actionCallback() + }, pressureLevel)) + } + + for threshold, action := range thresholds { + actionCallback, name, err := getActionCallback(action) + if err != nil { + return nil, err + } + + monitorCallback := func() { + log.Infof("Memory pressure above %s threshold, triggering %s", threshold, name) + actionCallback() + } + + var memoryMonitor cgroups.MemoryMonitor + threshold = strings.TrimSpace(threshold) + if strings.HasSuffix(threshold, "%") { + percentage, err := strconv.Atoi(strings.TrimSuffix(threshold, "%")) + if err != nil { + return nil, fmt.Errorf("invalid memory threshold '%s': %w", threshold, err) + } + + memoryMonitor = cgroups.MemoryPercentageThresholdMonitor(monitorCallback, uint64(percentage), false) + } else { + size, err := units.ParseBase2Bytes(strings.ToUpper(threshold)) + if err != nil { + return nil, fmt.Errorf("invalid memory threshold '%s': %w", threshold, err) + } + + memoryMonitor = cgroups.MemoryThresholdMonitor(monitorCallback, uint64(size), false) + } + + log.Infof("New memory threshold monitor on level %s with action %s", threshold, name) + memoryMonitors = append(memoryMonitors, memoryMonitor) + } + + return cgroups.NewMemoryController(kind, containerized, memoryMonitors...) +} diff --git a/cmd/system-probe/utils/memory_monitor_stub.go b/cmd/system-probe/utils/memory_monitor_stub.go new file mode 100644 index 0000000000000..db0028adbad62 --- /dev/null +++ b/cmd/system-probe/utils/memory_monitor_stub.go @@ -0,0 +1,23 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !linux +// +build !linux + +package utils + +// MemoryMonitor monitors cgroups' memory usage +type MemoryMonitor struct{} + +// NewMemoryMonitor instantiates a new memory monitor +func NewMemoryMonitor(kind string, containerized bool, pressureLevels map[string]string, thresholds map[string]string) (*MemoryMonitor, error) { + return &MemoryMonitor{}, nil +} + +// Start monitoring memory +func (mm *MemoryMonitor) Start() {} + +// Stop monitoring memory +func (mm *MemoryMonitor) Stop() {} diff --git a/cmd/systray/command/command.go b/cmd/systray/command/command.go index ce2086ae2185b..fbc1e14519c95 100644 --- a/cmd/systray/command/command.go +++ b/cmd/systray/command/command.go @@ -92,6 +92,7 @@ func MakeCommand() *cobra.Command { path.PyChecksPath, path.DefaultLogFile, path.DefaultJmxLogFile, + path.DefaultDogstatsDLogFile, )), flare.Module, // systray diff --git a/cmd/trace-agent/config/config.go b/cmd/trace-agent/config/config.go index eb49f82bc1dfc..ecc425eb85637 100644 --- a/cmd/trace-agent/config/config.go +++ b/cmd/trace-agent/config/config.go @@ -71,7 +71,6 @@ func LoadConfigFile(path string) (*config.AgentConfig, error) { func prepareConfig(path string) (*config.AgentConfig, error) { cfg := config.New() - cfg.LogFilePath = DefaultLogFilePath cfg.DDAgentBin = defaultDDAgentBin cfg.AgentVersion = version.AgentVersion cfg.GitCommit = version.Commit @@ -79,6 +78,9 @@ func prepareConfig(path string) (*config.AgentConfig, error) { if _, err := coreconfig.Load(); err != nil { return cfg, err } + if !coreconfig.Datadog.GetBool("disable_file_logging") { + cfg.LogFilePath = DefaultLogFilePath + } orch := fargate.GetOrchestrator() // Needs to be after loading config, because it relies on feature auto-detection cfg.FargateOrchestrator = config.FargateOrchestratorName(orch) if p := coreconfig.Datadog.GetProxies(); p != nil { @@ -173,7 +175,7 @@ func applyDatadogConfig(c *config.AgentConfig) error { if coreconfig.Datadog.IsSet("apm_config.enabled") { c.Enabled = coreconfig.Datadog.GetBool("apm_config.enabled") } - if coreconfig.Datadog.IsSet("apm_config.log_file") { + if coreconfig.Datadog.IsSet("apm_config.log_file") && !coreconfig.Datadog.GetBool("disable_file_logging") { c.LogFilePath = coreconfig.Datadog.GetString("apm_config.log_file") } diff --git a/cmd/trace-agent/config/config_test.go b/cmd/trace-agent/config/config_test.go index 5637f357c362e..1e9c88d886314 100644 --- a/cmd/trace-agent/config/config_test.go +++ b/cmd/trace-agent/config/config_test.go @@ -492,6 +492,20 @@ func TestFullYamlConfig(t *testing.T) { assert.True(o.CreditCards.Luhn) } +func TestFileLoggingDisabled(t *testing.T) { + defer cleanConfig()() + origcfg := coreconfig.Datadog + coreconfig.Datadog = coreconfig.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + defer func() { + coreconfig.Datadog = origcfg + }() + assert := assert.New(t) + c, err := prepareConfig("./testdata/disable_file_logging.yaml") + assert.NoError(err) + assert.NoError(applyDatadogConfig(c)) + assert.Equal("", c.LogFilePath) +} + func TestUndocumentedYamlConfig(t *testing.T) { defer cleanConfig()() origcfg := coreconfig.Datadog diff --git a/cmd/trace-agent/config/testdata/disable_file_logging.yaml b/cmd/trace-agent/config/testdata/disable_file_logging.yaml new file mode 100644 index 0000000000000..8b8e17d03f453 --- /dev/null +++ b/cmd/trace-agent/config/testdata/disable_file_logging.yaml @@ -0,0 +1,6 @@ +hostname: thing +api_key: apikey_12 +bind_host: 0.0.0.0 +dogstatsd_port: 28125 +log_level: DEBUG +disable_file_logging: true diff --git a/cmd/trace-agent/test/backend.go b/cmd/trace-agent/test/backend.go index 160856241b116..fb540dfb47fa0 100644 --- a/cmd/trace-agent/test/backend.go +++ b/cmd/trace-agent/test/backend.go @@ -19,8 +19,8 @@ import ( "github.com/DataDog/datadog-agent/pkg/trace/pb" - "github.com/gogo/protobuf/proto" "github.com/tinylib/msgp/msgp" + "google.golang.org/protobuf/proto" ) // defaultBackendAddress is the default listening address for the fake @@ -109,7 +109,7 @@ func (s *fakeBackend) handleStats(w http.ResponseWriter, req *http.Request) { if err := readMsgPRequest(req, &payload); err != nil { log.Println("server: error reading stats: ", err) } - s.out <- payload + s.out <- &payload } func (s *fakeBackend) handleTraces(w http.ResponseWriter, req *http.Request) { @@ -117,7 +117,7 @@ func (s *fakeBackend) handleTraces(w http.ResponseWriter, req *http.Request) { if err := readProtoRequest(req, &payload); err != nil { log.Println("server: error reading traces: ", err) } - s.out <- payload + s.out <- &payload } func readMsgPRequest(req *http.Request, msg msgp.Decodable) error { diff --git a/cmd/trace-agent/test/testsuite/cards_test.go b/cmd/trace-agent/test/testsuite/cards_test.go index 688c7979f7b94..f2332c3ea9728 100644 --- a/cmd/trace-agent/test/testsuite/cards_test.go +++ b/cmd/trace-agent/test/testsuite/cards_test.go @@ -97,7 +97,7 @@ apm_config: if err := r.PostMsgpack("/"+string(tt.version)+"/traces", payload); err != nil { t.Fatal(err) } - waitForTrace(t, &r, func(v pb.AgentPayload) { + waitForTrace(t, &r, func(v *pb.AgentPayload) { payloadsEqual(t, traces, v) assert.Equal(t, v.TracerPayloads[0].Chunks[0].Spans[0].Meta["credit_card_number"], tt.out) }) diff --git a/cmd/trace-agent/test/testsuite/chunking_test.go b/cmd/trace-agent/test/testsuite/chunking_test.go index 8c1116fc0766e..b81f6de0947c9 100644 --- a/cmd/trace-agent/test/testsuite/chunking_test.go +++ b/cmd/trace-agent/test/testsuite/chunking_test.go @@ -54,7 +54,7 @@ func TestPayloadChunking(t *testing.T) { for i := 0; i < payloadCount+1; i++ { select { case p := <-r.Out(): - if v, ok := p.(pb.AgentPayload); ok { + if v, ok := p.(*pb.AgentPayload); ok { // ok for _, tracerPayload := range v.TracerPayloads { got += len(tracerPayload.Chunks) diff --git a/cmd/trace-agent/test/testsuite/events_test.go b/cmd/trace-agent/test/testsuite/events_test.go index cd6baf9faa1ce..69b2a12ef56df 100644 --- a/cmd/trace-agent/test/testsuite/events_test.go +++ b/cmd/trace-agent/test/testsuite/events_test.go @@ -58,8 +58,8 @@ func TestAPMEvents(t *testing.T) { t.Fatal(err) } - waitForTrace(t, &runner, func(v pb.AgentPayload) { - if n := countEvents(&v); n != 0 { + waitForTrace(t, &runner, func(v *pb.AgentPayload) { + if n := countEvents(v); n != 0 { t.Fatalf("expected no events, got %d", n) } }) @@ -76,8 +76,8 @@ func TestAPMEvents(t *testing.T) { t.Fatal(err) } - waitForTrace(t, &runner, func(v pb.AgentPayload) { - if n := countEvents(&v); n != 1 { + waitForTrace(t, &runner, func(v *pb.AgentPayload) { + if n := countEvents(v); n != 1 { t.Fatalf("expected 1 event, got %d", n) } }) @@ -93,8 +93,8 @@ func TestAPMEvents(t *testing.T) { t.Fatal(err) } - waitForTrace(t, &runner, func(v pb.AgentPayload) { - if n := countEvents(&v); n != 5 { + waitForTrace(t, &runner, func(v *pb.AgentPayload) { + if n := countEvents(v); n != 5 { t.Fatalf("expected 5 event, got %d", n) } }) diff --git a/cmd/trace-agent/test/testsuite/hostname_test.go b/cmd/trace-agent/test/testsuite/hostname_test.go index 42bcdf0934e54..a59f79145fcec 100644 --- a/cmd/trace-agent/test/testsuite/hostname_test.go +++ b/cmd/trace-agent/test/testsuite/hostname_test.go @@ -49,7 +49,7 @@ func TestHostname(t *testing.T) { if err := r.Post(payload); err != nil { t.Fatal(err) } - waitForTrace(t, &r, func(v pb.AgentPayload) { + waitForTrace(t, &r, func(v *pb.AgentPayload) { if n := len(v.TracerPayloads); n != 1 { t.Fatalf("expected %d tracer payloads, got %d", 1, n) } @@ -81,7 +81,7 @@ func TestHostname(t *testing.T) { if err := r.Post(payload); err != nil { t.Fatal(err) } - waitForTrace(t, &r, func(v pb.AgentPayload) { + waitForTrace(t, &r, func(v *pb.AgentPayload) { if n := len(v.TracerPayloads); n != 1 { t.Fatalf("expected %d tracer payloads, got %d", 1, n) } @@ -97,18 +97,18 @@ func TestHostname(t *testing.T) { // waitForTrace waits on the out channel until it times out or receives an pb.AgentPayload. // If the latter happens it will call fn. -func waitForTrace(t *testing.T, runner *test.Runner, fn func(pb.AgentPayload)) { +func waitForTrace(t *testing.T, runner *test.Runner, fn func(*pb.AgentPayload)) { waitForTraceTimeout(t, runner, 3*time.Second, fn) } // waitForTraceTimeout behaves like waitForTrace but allows a customizable wait time. -func waitForTraceTimeout(t *testing.T, runner *test.Runner, wait time.Duration, fn func(pb.AgentPayload)) { +func waitForTraceTimeout(t *testing.T, runner *test.Runner, wait time.Duration, fn func(*pb.AgentPayload)) { timeout := time.After(wait) out := runner.Out() for { select { case p := <-out: - if v, ok := p.(pb.AgentPayload); ok { + if v, ok := p.(*pb.AgentPayload); ok { fn(v) return } diff --git a/cmd/trace-agent/test/testsuite/otlp_test.go b/cmd/trace-agent/test/testsuite/otlp_test.go index 65ad97c743215..d3e1e4ecbc653 100644 --- a/cmd/trace-agent/test/testsuite/otlp_test.go +++ b/cmd/trace-agent/test/testsuite/otlp_test.go @@ -79,7 +79,7 @@ apm_config: if err != nil { log.Fatal("Error calling: ", err) } - waitForTrace(t, &r, func(p pb.AgentPayload) { + waitForTrace(t, &r, func(p *pb.AgentPayload) { assert := assert.New(t) assert.Equal(p.Env, "my-env") assert.Len(p.TracerPayloads, 1) diff --git a/cmd/trace-agent/test/testsuite/proxy_test.go b/cmd/trace-agent/test/testsuite/proxy_test.go index ab0c6cf977b5f..0cb97879463d9 100644 --- a/cmd/trace-agent/test/testsuite/proxy_test.go +++ b/cmd/trace-agent/test/testsuite/proxy_test.go @@ -60,7 +60,7 @@ proxy: if err := r.Post(p); err != nil { t.Fatal(err) } - if err := r.PostMsgpack("/v0.6/stats", &testdata.ClientStatsTests[0].In); err != nil { + if err := r.PostMsgpack("/v0.6/stats", testdata.ClientStatsTests[0].In); err != nil { t.Fatal(err) } defer r.KillAgent() @@ -71,9 +71,9 @@ proxy: select { case p := <-out: switch p.(type) { - case pb.StatsPayload: + case *pb.StatsPayload: gots = true - case pb.AgentPayload: + case *pb.AgentPayload: gott = true } if gott && gots { diff --git a/cmd/trace-agent/test/testsuite/stats_test.go b/cmd/trace-agent/test/testsuite/stats_test.go index c79e59b0ad67d..5b938a60f9348 100644 --- a/cmd/trace-agent/test/testsuite/stats_test.go +++ b/cmd/trace-agent/test/testsuite/stats_test.go @@ -34,16 +34,16 @@ func TestClientStats(t *testing.T) { } defer r.KillAgent() - if err := r.PostMsgpack("/v0.6/stats", &tt.In); err != nil { + if err := r.PostMsgpack("/v0.6/stats", tt.In); err != nil { t.Fatal(err) } timeout := time.After(3 * time.Second) out := r.Out() - res := make([]pb.StatsPayload, 0, len(tt.Out)) + res := make([]*pb.StatsPayload, 0, len(tt.Out)) for { select { case p := <-out: - got, ok := p.(pb.StatsPayload) + got, ok := p.(*pb.StatsPayload) if !ok { continue } @@ -52,6 +52,7 @@ func TestClientStats(t *testing.T) { if len(res) < len(tt.Out) { continue } + assert.Equalf(t, len(res), len(tt.Out), "res had so many elements: %d\ntt has:%d", len(res), len(tt.Out)) assert.ElementsMatch(t, res, tt.Out) return case <-timeout: @@ -62,7 +63,7 @@ func TestClientStats(t *testing.T) { } } -func normalizeTimeFields(t *testing.T, p pb.StatsPayload) pb.StatsPayload { +func normalizeTimeFields(t *testing.T, p *pb.StatsPayload) *pb.StatsPayload { now := time.Now().UnixNano() for _, s := range p.Stats { for i := range s.Stats { diff --git a/cmd/trace-agent/test/testsuite/testdata/clientstats.go b/cmd/trace-agent/test/testsuite/testdata/clientstats.go index 83fdb7026bcc5..4f63672b94fde 100644 --- a/cmd/trace-agent/test/testsuite/testdata/clientstats.go +++ b/cmd/trace-agent/test/testsuite/testdata/clientstats.go @@ -23,11 +23,11 @@ func getEmptyDDSketch() []byte { // ClientStatsTests contains a suite of tests for testing the stats endpoint. var ClientStatsTests = []struct { - In pb.ClientStatsPayload - Out []pb.StatsPayload + In *pb.ClientStatsPayload + Out []*pb.StatsPayload }{ { - In: pb.ClientStatsPayload{ + In: &pb.ClientStatsPayload{ Hostname: "testhost", Env: "testing", Version: "0.1-alpha", @@ -56,7 +56,7 @@ var ClientStatsTests = []struct { }, }, }, - Out: []pb.StatsPayload{{ + Out: []*pb.StatsPayload{{ AgentHostname: "agent-hostname", AgentEnv: "agent-env", AgentVersion: "6.0.0", @@ -97,7 +97,7 @@ var ClientStatsTests = []struct { }, }, { - In: pb.ClientStatsPayload{ + In: &pb.ClientStatsPayload{ Hostname: "testhost", Env: "testing", Version: "0.1-alpha", @@ -153,7 +153,7 @@ var ClientStatsTests = []struct { }, }, }, - Out: []pb.StatsPayload{ + Out: []*pb.StatsPayload{ { AgentHostname: "agent-hostname", AgentEnv: "agent-env", diff --git a/cmd/trace-agent/test/testsuite/traces_test.go b/cmd/trace-agent/test/testsuite/traces_test.go index c7bfcda5ab6f6..f6d2f6fa70989 100644 --- a/cmd/trace-agent/test/testsuite/traces_test.go +++ b/cmd/trace-agent/test/testsuite/traces_test.go @@ -44,7 +44,7 @@ func TestTraces(t *testing.T) { if err := r.Post(p); err != nil { t.Fatal(err) } - waitForTrace(t, &r, func(v pb.AgentPayload) { + waitForTrace(t, &r, func(v *pb.AgentPayload) { if v.Env != "my-env" { t.Fatalf("Expected env my-env, got: %q", v.Env) } @@ -73,7 +73,7 @@ func TestTraces(t *testing.T) { if err := r.Post(p); err != nil { t.Fatal(err) } - waitForTrace(t, &r, func(v pb.AgentPayload) { + waitForTrace(t, &r, func(v *pb.AgentPayload) { payloadsEqual(t, p[2:], v) }) }) @@ -95,7 +95,7 @@ func TestTraces(t *testing.T) { if err := r.Post(p); err != nil { t.Fatal(err) } - waitForTrace(t, &r, func(v pb.AgentPayload) { + waitForTrace(t, &r, func(v *pb.AgentPayload) { payloadsEqual(t, append(p[:2], p[3:]...), v) }) }) @@ -134,14 +134,14 @@ func TestTraces(t *testing.T) { if err := r.Post(p); err != nil { t.Fatal(err) } - waitForTrace(t, &r, func(v pb.AgentPayload) { + waitForTrace(t, &r, func(v *pb.AgentPayload) { payloadsEqual(t, p[:2], v) }) }) } // payloadsEqual validates that the traces in from are the same as the ones in to. -func payloadsEqual(t *testing.T, from pb.Traces, to pb.AgentPayload) { +func payloadsEqual(t *testing.T, from pb.Traces, to *pb.AgentPayload) { got := 0 for _, tracerPayload := range to.TracerPayloads { got += len(tracerPayload.Chunks) diff --git a/comp/README.md b/comp/README.md index 3c3356f5cd7c1..df1042eb36215 100644 --- a/comp/README.md +++ b/comp/README.md @@ -81,6 +81,10 @@ Package containercheck implements a component to handle Container data collectio Package expvars initializes the expvar server of the process agent. +### [comp/process/forwarders](https://pkg.go.dev/github.com/DataDog/dd-agent-comp-experiments/comp/process/forwarders) + +Package forwarders implements a component to provide forwarders used by the process agent. + ### [comp/process/hostinfo](https://pkg.go.dev/github.com/DataDog/dd-agent-comp-experiments/comp/process/hostinfo) Package hostinfo wraps the hostinfo inside a component. This is useful because it is relied on by other components. diff --git a/comp/core/flare/params.go b/comp/core/flare/params.go index 12bc567425686..140fbf68ab907 100644 --- a/comp/core/flare/params.go +++ b/comp/core/flare/params.go @@ -22,23 +22,27 @@ type Params struct { // defaultJMXLogFile the path to the default JMX log file defaultJMXLogFile string + + // defaultDogstatsdLogFile the path to the default JMX log file + defaultDogstatsdLogFile string } // NewLocalParams returns parameters for to initialize a local flare component. Local flares are meant to be created by // the CLI process instead of the main Agent one. -func NewLocalParams(distPath string, pythonChecksPath string, defaultLogFile string, defaultJMXLogFile string) Params { - p := NewParams(distPath, pythonChecksPath, defaultLogFile, defaultJMXLogFile) +func NewLocalParams(distPath string, pythonChecksPath string, defaultLogFile string, defaultJMXLogFile string, defaultDogstatsdLogFile string) Params { + p := NewParams(distPath, pythonChecksPath, defaultLogFile, defaultJMXLogFile, defaultDogstatsdLogFile) p.local = true return p } // NewLocalParams returns parameters for to initialize a non local flare component -func NewParams(distPath string, pythonChecksPath string, defaultLogFile string, defaultJMXLogFile string) Params { +func NewParams(distPath string, pythonChecksPath string, defaultLogFile string, defaultJMXLogFile string, defaultDogstatsdLogFile string) Params { return Params{ - local: false, - distPath: distPath, - pythonChecksPath: pythonChecksPath, - defaultLogFile: defaultLogFile, - defaultJMXLogFile: defaultJMXLogFile, + local: false, + distPath: distPath, + pythonChecksPath: pythonChecksPath, + defaultLogFile: defaultLogFile, + defaultJMXLogFile: defaultJMXLogFile, + defaultDogstatsdLogFile: defaultDogstatsdLogFile, } } diff --git a/comp/core/flare/providers.go b/comp/core/flare/providers.go index be5eff9e06e15..538652d5298a0 100644 --- a/comp/core/flare/providers.go +++ b/comp/core/flare/providers.go @@ -33,6 +33,11 @@ func (f *flare) collectLogsFiles(fb flarehelpers.FlareBuilder) error { jmxLogFile = f.params.defaultJMXLogFile } + dogstatsdLogFile := f.config.GetString("dogstatsd_log_file") + if dogstatsdLogFile == "" { + dogstatsdLogFile = f.params.defaultDogstatsdLogFile + } + shouldIncludeFunc := func(path string) bool { if filepath.Ext(path) == ".log" || getFirstSuffix(path) == ".log" { return true @@ -43,6 +48,7 @@ func (f *flare) collectLogsFiles(fb flarehelpers.FlareBuilder) error { f.log.Flush() fb.CopyDirToWithoutScrubbing(filepath.Dir(logFile), "logs", shouldIncludeFunc) fb.CopyDirToWithoutScrubbing(filepath.Dir(jmxLogFile), "logs", shouldIncludeFunc) + fb.CopyDirToWithoutScrubbing(filepath.Dir(dogstatsdLogFile), "logs", shouldIncludeFunc) return nil } diff --git a/comp/dogstatsd/replay/writer.go b/comp/dogstatsd/replay/writer.go index 5e23edd73510e..8a34d8c5a3282 100644 --- a/comp/dogstatsd/replay/writer.go +++ b/comp/dogstatsd/replay/writer.go @@ -22,9 +22,9 @@ import ( "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo" - "github.com/DataDog/datadog-agent/pkg/proto/utils" "github.com/DataDog/datadog-agent/pkg/tagger" "github.com/DataDog/datadog-agent/pkg/util/log" + protoutils "github.com/DataDog/datadog-agent/pkg/util/proto" "github.com/golang/protobuf/proto" ) @@ -331,7 +331,7 @@ func (tc *TrafficCaptureWriter) writeState() (int, error) { continue } - entityID, err := utils.Tagger2PbEntityID(entity.ID) + entityID, err := protoutils.Tagger2PbEntityID(entity.ID) if err != nil { log.Warnf("unable to compute valid EntityID for %v", id) continue diff --git a/comp/dogstatsd/serverDebug/debug.go b/comp/dogstatsd/serverDebug/debug.go index e7addaf6b3979..b37c95c4d65b7 100644 --- a/comp/dogstatsd/serverDebug/debug.go +++ b/comp/dogstatsd/serverDebug/debug.go @@ -14,11 +14,15 @@ import ( "sync" "time" + commonpath "github.com/DataDog/datadog-agent/cmd/agent/common/path" + configComponent "github.com/DataDog/datadog-agent/comp/core/config" logComponent "github.com/DataDog/datadog-agent/comp/core/log" "github.com/DataDog/datadog-agent/pkg/aggregator/ckey" + "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/tagset" "github.com/benbjohnson/clock" + slog "github.com/cihub/seelog" "go.uber.org/atomic" "go.uber.org/fx" ) @@ -26,7 +30,8 @@ import ( type dependencies struct { fx.In - Log logComponent.Component + Log logComponent.Component + Config configComponent.Component } // metricStat holds how many times a metric has been @@ -52,21 +57,22 @@ type serverDebug struct { // we use a real clock in production code or a mock clock for unit testing clock clock.Clock tagsAccumulator *tagset.HashingTagsAccumulator + // dogstatsdDebugLogger is an instance of the logger config that can be used to create new logger for dogstatsd-stats metrics + dogstatsdDebugLogger slog.LoggerInterface } // TODO: (components) - remove once serverless is an FX app func NewServerlessServerDebug() Component { - return newServerDebugCompat(logComponent.NewTemporaryLoggerWithoutInit()) + return newServerDebugCompat(logComponent.NewTemporaryLoggerWithoutInit(), config.Datadog) } // newServerDebug creates a new instance of a ServerDebug func newServerDebug(deps dependencies) Component { - return newServerDebugCompat(deps.Log) + return newServerDebugCompat(deps.Log, deps.Config) } -func newServerDebugCompat(log logComponent.Component) Component { - - return &serverDebug{ +func newServerDebugCompat(log logComponent.Component, cfg config.ConfigReader) Component { + sd := &serverDebug{ log: log, enabled: atomic.NewBool(false), Stats: make(map[ckey.ContextKey]metricStat), @@ -78,6 +84,9 @@ func newServerDebugCompat(log logComponent.Component) Component { keyGen: ckey.NewKeyGenerator(), clock: clock.New(), } + sd.dogstatsdDebugLogger = sd.getDogstatsdDebug(cfg) + + return sd } // metricsCountBuckets is counting the amount of metrics received for the last 5 seconds. @@ -157,6 +166,11 @@ func (d *serverDebug) StoreMetricStats(sample metrics.MetricSample) { ms.Tags = strings.Join(d.tagsAccumulator.Get(), " ") // we don't want/need to share the underlying array d.Stats[key] = ms + if d.dogstatsdDebugLogger != nil { + logMessage := "Metric Name: %v | Tags: {%v} | Count: %v | Last Seen: %v " + d.dogstatsdDebugLogger.Infof(logMessage, ms.Name, ms.Tags, ms.Count, ms.LastSeen) + } + d.metricsCounts.metricChan <- struct{}{} } @@ -250,3 +264,28 @@ func (d *serverDebug) disableMetricsStats() { d.log.Info("Disabling DogStatsD debug metrics stats.") } + +// build a local dogstatsd logger and bubbling up any errors +func (d *serverDebug) getDogstatsdDebug(cfg config.ConfigReader) slog.LoggerInterface { + + var dogstatsdLogger slog.LoggerInterface + + // Configuring the log file path + logFile := cfg.GetString("dogstatsd_log_file") + if logFile == "" { + logFile = commonpath.DefaultDogstatsDLogFile + } + + // Set up dogstatsdLogger + if cfg.GetBool("dogstatsd_logging_enabled") { + logger, e := config.SetupDogstatsdLogger(logFile) + if e != nil { + // use component logger instead of global logger. + d.log.Errorf("Unable to set up Dogstatsd logger: %v. || Please reach out to Datadog support at https://docs.datadoghq.com/help/ ", e) + return nil + } + dogstatsdLogger = logger + } + return dogstatsdLogger + +} diff --git a/comp/dogstatsd/serverDebug/debug_test.go b/comp/dogstatsd/serverDebug/debug_test.go index 4b8141f9630c4..9f867da474bba 100644 --- a/comp/dogstatsd/serverDebug/debug_test.go +++ b/comp/dogstatsd/serverDebug/debug_test.go @@ -11,6 +11,7 @@ import ( "time" "github.com/DataDog/datadog-agent/comp/core" + configComponent "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/pkg/aggregator/ckey" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/tagset" @@ -21,16 +22,19 @@ import ( "go.uber.org/fx" ) -func fulfillDeps(t testing.TB) Component { +func fulfillDeps(t testing.TB, overrides map[string]interface{}) Component { return fxutil.Test[Component](t, fx.Options( core.MockBundle, fx.Supply(core.BundleParams{}), + fx.Replace(configComponent.MockParams{Overrides: overrides}), Module, )) } func TestDebugStatsSpike(t *testing.T) { - debug := fulfillDeps(t) + cfg := make(map[string]interface{}) + cfg["dogstatsd_logging_enabled"] = false + debug := fulfillDeps(t, cfg) d := debug.(*serverDebug) assert := assert.New(t) @@ -83,10 +87,13 @@ func TestDebugStatsSpike(t *testing.T) { // it is no more considered a spike because we had another second with 500 metrics assert.False(d.hasSpike()) + } func TestDebugStats(t *testing.T) { - debug := fulfillDeps(t) + cfg := make(map[string]interface{}) + cfg["dogstatsd_logging_enabled"] = false + debug := fulfillDeps(t, cfg) d := debug.(*serverDebug) clk := clock.NewMock() @@ -156,4 +163,5 @@ func TestDebugStats(t *testing.T) { require.Equal(t, metric4.Tags, "c b") require.Equal(t, metric5.Tags, "c b") require.Equal(t, hash4, hash5) + } diff --git a/comp/forwarder/defaultforwarder/forwarder.go b/comp/forwarder/defaultforwarder/forwarder.go index 0c54c8a3af99f..6b489ca892c6b 100644 --- a/comp/forwarder/defaultforwarder/forwarder.go +++ b/comp/forwarder/defaultforwarder/forwarder.go @@ -17,10 +17,14 @@ type dependencies struct { } func newForwarder(dep dependencies) Component { - if dep.Params.UseNoopForwarder { + return NewForwarder(dep.Config, dep.Params) +} + +func NewForwarder(config config.Component, params Params) Component { + if params.UseNoopForwarder { return NoopForwarder{} } - return NewDefaultForwarder(dep.Config, dep.Params.Options) + return NewDefaultForwarder(config, params.Options) } func newMockForwarder(config config.Component) Component { diff --git a/comp/process/bundle.go b/comp/process/bundle.go index 5709349db8802..ed3499682a13d 100644 --- a/comp/process/bundle.go +++ b/comp/process/bundle.go @@ -17,6 +17,7 @@ import ( "github.com/DataDog/datadog-agent/comp/process/connectionscheck" "github.com/DataDog/datadog-agent/comp/process/containercheck" "github.com/DataDog/datadog-agent/comp/process/expvars" + "github.com/DataDog/datadog-agent/comp/process/forwarders" "github.com/DataDog/datadog-agent/comp/process/hostinfo" "github.com/DataDog/datadog-agent/comp/process/podcheck" "github.com/DataDog/datadog-agent/comp/process/processcheck" @@ -49,6 +50,7 @@ var Bundle = fxutil.Bundle( hostinfo.Module, expvars.Module, apiserver.Module, + forwarders.Module, core.Bundle, ) diff --git a/comp/process/expvars/expvars.go b/comp/process/expvars/expvars.go index 8441545b78ac1..e08d3c693a363 100644 --- a/comp/process/expvars/expvars.go +++ b/comp/process/expvars/expvars.go @@ -19,7 +19,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig" "github.com/DataDog/datadog-agent/comp/process/hostinfo" ddconfig "github.com/DataDog/datadog-agent/pkg/config" - "github.com/DataDog/datadog-agent/pkg/process/runner" + "github.com/DataDog/datadog-agent/pkg/process/runner/endpoint" "github.com/DataDog/datadog-agent/pkg/process/status" "github.com/DataDog/datadog-agent/pkg/process/util" ) @@ -91,7 +91,7 @@ func initStatus(deps dependencies) error { // If the sysprobe module is enabled, the process check can call out to the sysprobe for privileged stats _, processModuleEnabled := deps.SysProbeConfig.Object().EnabledModules[sysconfig.ProcessModule] - eps, err := runner.GetAPIEndpoints(deps.Config) + eps, err := endpoint.GetAPIEndpoints(deps.Config) if err != nil { _ = deps.Log.Criticalf("Failed to initialize Api Endpoints: %s", err.Error()) } diff --git a/comp/process/forwarders/component.go b/comp/process/forwarders/component.go new file mode 100644 index 0000000000000..61bdcd977864d --- /dev/null +++ b/comp/process/forwarders/component.go @@ -0,0 +1,32 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package forwarders implements a component to provide forwarders used by the process agent. +package forwarders + +import ( + "go.uber.org/fx" + + "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" + "github.com/DataDog/datadog-agent/pkg/util/fxutil" +) + +// team: processes + +type Component interface { + GetEventForwarder() defaultforwarder.Component + GetProcessForwarder() defaultforwarder.Component + GetRTProcessForwarder() defaultforwarder.Component + GetConnectionsForwarder() defaultforwarder.Component +} + +// Module defines the fx options for this component. +var Module = fxutil.Component( + fx.Provide(newForwarders), +) + +var MockModule = fxutil.Component( + fx.Provide(newMockForwarders), +) diff --git a/comp/process/forwarders/forwarders.go b/comp/process/forwarders/forwarders.go new file mode 100644 index 0000000000000..8b20866dfb761 --- /dev/null +++ b/comp/process/forwarders/forwarders.go @@ -0,0 +1,89 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package forwarders + +import ( + "github.com/DataDog/datadog-agent/comp/core/config" + "github.com/DataDog/datadog-agent/comp/core/log" + "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" + ddconfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/resolver" + "github.com/DataDog/datadog-agent/pkg/process/runner/endpoint" + apicfg "github.com/DataDog/datadog-agent/pkg/process/util/api/config" + "go.uber.org/fx" +) + +type dependencies struct { + fx.In + + Config config.Component + Logger log.Component +} + +type forwarders struct { + eventForwarder defaultforwarder.Component + processForwarder defaultforwarder.Component + rtProcessForwarder defaultforwarder.Component + connectionsForwarder defaultforwarder.Component +} + +func newForwarders(deps dependencies) (Component, error) { + config := deps.Config + queueBytes := config.GetInt("process_config.process_queue_bytes") + if queueBytes <= 0 { + deps.Logger.Warnf("Invalid queue bytes size: %d. Using default value: %d", queueBytes, ddconfig.DefaultProcessQueueBytes) + queueBytes = ddconfig.DefaultProcessQueueBytes + } + + eventsAPIEndpoints, err := endpoint.GetEventsAPIEndpoints(config) + if err != nil { + return nil, err + } + + eventForwarderOpts := createParams(deps.Config, queueBytes, eventsAPIEndpoints) + + processAPIEndpoints, err := endpoint.GetAPIEndpoints(config) + if err != nil { + return nil, err + } + + processForwarderOpts := createParams(deps.Config, queueBytes, processAPIEndpoints) + + return &forwarders{ + eventForwarder: defaultforwarder.NewForwarder(deps.Config, eventForwarderOpts), + processForwarder: defaultforwarder.NewForwarder(deps.Config, processForwarderOpts), + rtProcessForwarder: defaultforwarder.NewForwarder(deps.Config, processForwarderOpts), + connectionsForwarder: defaultforwarder.NewForwarder(deps.Config, processForwarderOpts), + }, nil + +} + +func createParams(config config.Component, queueBytes int, endpoints []apicfg.Endpoint) defaultforwarder.Params { + forwarderOpts := defaultforwarder.NewOptionsWithResolvers(config, resolver.NewSingleDomainResolvers(apicfg.KeysPerDomains(endpoints))) + forwarderOpts.DisableAPIKeyChecking = true + forwarderOpts.RetryQueuePayloadsTotalMaxSize = queueBytes // Allow more in-flight requests than the default + return defaultforwarder.Params{Options: forwarderOpts} +} + +func (f *forwarders) GetEventForwarder() defaultforwarder.Component { + return f.eventForwarder +} + +func (f *forwarders) GetProcessForwarder() defaultforwarder.Component { + return f.processForwarder +} + +func (f *forwarders) GetRTProcessForwarder() defaultforwarder.Component { + return f.rtProcessForwarder +} + +func (f *forwarders) GetConnectionsForwarder() defaultforwarder.Component { + return f.connectionsForwarder +} + +func newMockForwarders(deps dependencies) (Component, error) { + return newForwarders(deps) +} diff --git a/comp/process/submitter/submitter.go b/comp/process/submitter/submitter.go index 005c88c22b927..e0b4be862be1c 100644 --- a/comp/process/submitter/submitter.go +++ b/comp/process/submitter/submitter.go @@ -14,6 +14,7 @@ import ( "go.uber.org/fx" "github.com/DataDog/datadog-agent/comp/core/config" + "github.com/DataDog/datadog-agent/comp/process/forwarders" "github.com/DataDog/datadog-agent/comp/process/hostinfo" "github.com/DataDog/datadog-agent/comp/process/types" processRunner "github.com/DataDog/datadog-agent/pkg/process/runner" @@ -29,8 +30,9 @@ type dependencies struct { fx.In Lc fx.Lifecycle - HostInfo hostinfo.Component - Config config.Component + HostInfo hostinfo.Component + Config config.Component + Forwarders forwarders.Component } type result struct { @@ -41,7 +43,7 @@ type result struct { } func newSubmitter(deps dependencies) (result, error) { - s, err := processRunner.NewSubmitter(deps.Config, deps.HostInfo.Object().HostName) + s, err := processRunner.NewSubmitter(deps.Config, deps.Forwarders, deps.HostInfo.Object().HostName) if err != nil { return result{}, err } diff --git a/comp/process/submitter/submitter_test.go b/comp/process/submitter/submitter_test.go index 2d051997bddac..0c95f80c72dc7 100644 --- a/comp/process/submitter/submitter_test.go +++ b/comp/process/submitter/submitter_test.go @@ -11,6 +11,7 @@ import ( "go.uber.org/fx" "github.com/DataDog/datadog-agent/comp/core" + "github.com/DataDog/datadog-agent/comp/process/forwarders" "github.com/DataDog/datadog-agent/comp/process/hostinfo" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -19,6 +20,7 @@ func TestSubmitterLifecycle(t *testing.T) { _ = fxutil.Test[Component](t, fx.Options( hostinfo.MockModule, core.MockBundle, + forwarders.MockModule, Module, )) } diff --git a/docs/dev/agent_omnibus.md b/docs/dev/agent_omnibus.md index 9eb1f9088c71b..ee6647524149d 100644 --- a/docs/dev/agent_omnibus.md +++ b/docs/dev/agent_omnibus.md @@ -118,7 +118,7 @@ $opts = "-e OMNIBUS_TARGET=main -e RELEASE_VERSION=$RELEASE_VERSION -e MAJOR_VER if ($DEBUG) { $opts += " -e DEBUG_CUSTOMACTION=yes " } -$cmd += " -m 4096M -v ""$(Get-Location):c:\mnt"" $opts datadog/agent-buildimages-windows_x64:1809 c:\mnt\tasks\winbuildscripts\buildwin.bat" +$cmd += " -m 8192M -v ""$(Get-Location):c:\mnt"" $opts datadog/agent-buildimages-windows_x64:1809 c:\mnt\tasks\winbuildscripts\buildwin.bat" Write-Host $cmd Invoke-Expression -Command $cmd ``` diff --git a/go.mod b/go.mod index 80a99c33f83c5..59caa3946e1db 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,9 @@ replace ( ) replace ( + github.com/DataDog/datadog-agent/pkg/gohai => ./pkg/gohai github.com/DataDog/datadog-agent/pkg/obfuscate => ./pkg/obfuscate + github.com/DataDog/datadog-agent/pkg/proto => ./pkg/proto github.com/DataDog/datadog-agent/pkg/remoteconfig/state => ./pkg/remoteconfig/state github.com/DataDog/datadog-agent/pkg/security/secl => ./pkg/security/secl github.com/DataDog/datadog-agent/pkg/trace => ./pkg/trace @@ -39,28 +41,28 @@ require ( code.cloudfoundry.org/bbs v0.0.0-20200403215808-d7bc971db0db code.cloudfoundry.org/garden v0.0.0-20210208153517-580cadd489d2 code.cloudfoundry.org/lager v2.0.0+incompatible - github.com/CycloneDX/cyclonedx-go v0.7.0 - github.com/DataDog/agent-payload/v5 v5.0.81 + github.com/CycloneDX/cyclonedx-go v0.7.1 + github.com/DataDog/agent-payload/v5 v5.0.85 github.com/DataDog/appsec-internal-go v0.0.0-20230215162203-5149228be86a - github.com/DataDog/datadog-agent/pkg/obfuscate v0.45.0-rc.3 - github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.45.0-rc.3 - github.com/DataDog/datadog-agent/pkg/security/secl v0.45.0-rc.3 - github.com/DataDog/datadog-agent/pkg/trace v0.45.0-rc.4 - github.com/DataDog/datadog-agent/pkg/util/cgroups v0.45.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.45.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/pointer v0.45.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.45.0-rc.3 + github.com/DataDog/datadog-agent/pkg/gohai v0.46.0-rc.2 + github.com/DataDog/datadog-agent/pkg/obfuscate v0.46.0-rc.2 + github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.46.0-rc.2 + github.com/DataDog/datadog-agent/pkg/security/secl v0.46.0-rc.2 + github.com/DataDog/datadog-agent/pkg/trace v0.46.0-rc.2 + github.com/DataDog/datadog-agent/pkg/util/cgroups v0.46.0-rc.2 + github.com/DataDog/datadog-agent/pkg/util/log v0.46.0-rc.2 + github.com/DataDog/datadog-agent/pkg/util/pointer v0.46.0-rc.2 + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.46.0-rc.2 github.com/DataDog/datadog-go/v5 v5.1.1 - github.com/DataDog/datadog-operator v0.7.1-0.20230215125730-2ba58ce29d56 + github.com/DataDog/datadog-operator v1.0.3 github.com/DataDog/ebpf-manager v0.2.8-0.20230331131947-0cbd4db2728c github.com/DataDog/go-libddwaf v1.0.0 github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork - github.com/DataDog/gohai v0.0.0-20221116153829-5d479901d2e9 github.com/DataDog/gopsutil v1.2.2 github.com/DataDog/nikos v1.12.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.2.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.2.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.2.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.2.3 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.2.3 + github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.2.3 github.com/DataDog/sketches-go v1.4.2 github.com/DataDog/viper v1.12.0 github.com/DataDog/watermarkpodautoscaler v0.5.2 @@ -69,12 +71,11 @@ require ( github.com/Masterminds/semver v1.5.0 github.com/Masterminds/semver/v3 v3.2.1 github.com/Masterminds/sprig/v3 v3.2.3 // indirect - github.com/Microsoft/go-winio v0.6.0 - github.com/Microsoft/hcsshim v0.9.8 + github.com/Microsoft/go-winio v0.6.1 + github.com/Microsoft/hcsshim v0.9.9 github.com/acobaugh/osrelease v0.1.0 github.com/alecthomas/participle v0.7.1 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 - github.com/aquasecurity/trivy v0.0.0-00010101000000-000000000000 // keep this proto version to not confuse dependabot github.com/aquasecurity/trivy-db v0.0.0-20230105123735-5ce110fc82e1 github.com/avast/retry-go/v4 v4.3.4 github.com/aws/aws-lambda-go v1.37.0 @@ -88,7 +89,6 @@ require ( github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 github.com/cilium/ebpf v0.10.0 github.com/clbanning/mxj v1.8.4 - github.com/cloudfoundry-community/go-cfclient v0.0.0-20210621174645-7773f7e22665 github.com/containerd/cgroups v1.0.4 github.com/containerd/containerd v1.6.20 github.com/containerd/typeurl v1.0.2 @@ -98,7 +98,7 @@ require ( github.com/cri-o/ocicni v0.4.0 github.com/cyphar/filepath-securejoin v0.2.3 github.com/davecgh/go-spew v1.1.1 - github.com/docker/docker v23.0.5+incompatible + github.com/docker/docker v24.0.2+incompatible github.com/docker/go-connections v0.4.0 github.com/dustin/go-humanize v1.0.1 github.com/elastic/go-libaudit v0.4.0 @@ -118,7 +118,7 @@ require ( github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.3 github.com/google/go-cmp v0.5.9 - github.com/google/go-containerregistry v0.12.0 + github.com/google/go-containerregistry v0.14.0 github.com/google/gofuzz v1.2.0 github.com/google/gopacket v1.1.19 github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 @@ -127,13 +127,13 @@ require ( github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/h2non/filetype v1.1.3 - github.com/hashicorp/consul/api v1.19.1 + github.com/hashicorp/consul/api v1.20.0 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/golang-lru v0.5.4 github.com/hashicorp/golang-lru/v2 v2.0.2 github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 github.com/iceber/iouring-go v0.0.0-20220609112130-b1dc8dd9fbfd - github.com/imdario/mergo v0.3.13 + github.com/imdario/mergo v0.3.15 github.com/invopop/jsonschema v0.7.0 github.com/iovisor/gobpf v0.2.0 github.com/itchyny/gojq v0.12.12 @@ -151,36 +151,36 @@ require ( github.com/netsampler/goflow2 v1.3.3 github.com/olekukonko/tablewriter v0.0.5 github.com/oliveagle/jsonpath v0.0.0-20180606110733-2e52cf6e6852 - github.com/open-policy-agent/opa v0.51.0 + github.com/open-policy-agent/opa v0.53.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.75.0 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b - github.com/opencontainers/runtime-spec v1.1.0-rc.1 + github.com/opencontainers/runtime-spec v1.1.0-rc.2 github.com/openshift/api v3.9.0+incompatible github.com/pahanini/go-grpc-bidirectional-streaming-example v0.0.0-20211027164128-cc6111af44be github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.15.0 + github.com/prometheus/client_golang v1.15.1 github.com/prometheus/client_model v0.3.0 - github.com/prometheus/procfs v0.9.0 + github.com/prometheus/procfs v0.10.0 github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 github.com/robfig/cron/v3 v3.0.1 - github.com/samber/lo v1.37.0 + github.com/samber/lo v1.38.1 github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da github.com/secure-systems-lab/go-securesystemslib v0.5.0 github.com/shirou/gopsutil/v3 v3.23.2 github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 - github.com/sirupsen/logrus v1.9.0 + github.com/sirupsen/logrus v1.9.2 github.com/skydive-project/go-debouncer v1.0.0 - github.com/smira/go-xz v0.0.0-20220607140411-c2a07d4bedda + github.com/smira/go-xz v0.1.0 github.com/spf13/afero v1.9.3 github.com/spf13/cast v1.5.1 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/streadway/amqp v1.0.0 - github.com/stretchr/testify v1.8.2 + github.com/stretchr/testify v1.8.3 github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 - github.com/tinylib/msgp v1.1.6 + github.com/tinylib/msgp v1.1.8 github.com/twmb/murmur3 v1.1.6 github.com/uptrace/bun v1.1.12 github.com/uptrace/bun/dialect/pgdialect v1.1.12 @@ -200,7 +200,7 @@ require ( go.opentelemetry.io/collector/exporter v0.75.0 go.opentelemetry.io/collector/exporter/loggingexporter v0.75.0 go.opentelemetry.io/collector/exporter/otlpexporter v0.75.0 - go.opentelemetry.io/collector/pdata v1.0.0-rcv0011 + go.opentelemetry.io/collector/pdata v1.0.0-rcv0012 go.opentelemetry.io/collector/processor/batchprocessor v0.75.0 go.opentelemetry.io/collector/receiver v0.75.0 go.opentelemetry.io/collector/receiver/otlpreceiver v0.75.0 @@ -220,8 +220,8 @@ require ( golang.org/x/time v0.3.0 golang.org/x/tools v0.9.1 golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 - google.golang.org/genproto v0.0.0-20230320184635-7606e756e683 - google.golang.org/grpc v1.54.0 + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/grpc v1.55.0 google.golang.org/grpc/examples v0.0.0-20221020162917-9127159caf5a google.golang.org/protobuf v1.30.0 gopkg.in/DataDog/dd-trace-go.v1 v1.49.1 @@ -249,9 +249,9 @@ require ( require ( cloud.google.com/go v0.110.0 // indirect - cloud.google.com/go/compute v1.18.0 // indirect + cloud.google.com/go/compute v1.19.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v0.12.0 // indirect + cloud.google.com/go/iam v0.13.0 // indirect cloud.google.com/go/storage v1.30.1 // indirect code.cloudfoundry.org/cfhttp/v2 v2.0.0 // indirect code.cloudfoundry.org/clock v1.0.0 // indirect @@ -260,7 +260,6 @@ require ( code.cloudfoundry.org/executor v0.0.0-20200218194701-024d0bdd52d4 // indirect code.cloudfoundry.org/go-diodes v0.0.0-20190809170250-f77fb823c7ee // indirect code.cloudfoundry.org/go-loggregator v7.4.0+incompatible // indirect - code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f // indirect code.cloudfoundry.org/locket v0.0.0-20200131001124-67fd0a0fdf2d // indirect code.cloudfoundry.org/rep v0.0.0-20200325195957-1404b978e31e // indirect code.cloudfoundry.org/rfc5424 v0.0.0-20180905210152-236a6d29298a // indirect @@ -301,7 +300,7 @@ require ( github.com/aquasecurity/tml v0.6.1 // indirect github.com/arduino/go-apt-client v0.0.0-20190812130613-5613f843fdc8 // indirect github.com/armon/go-metrics v0.4.0 // indirect - github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/awalterschulze/gographviz v2.0.3+incompatible // indirect github.com/aws/aws-sdk-go-v2 v1.17.1 // indirect github.com/aws/aws-sdk-go-v2/config v1.18.3 // indirect @@ -326,7 +325,7 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/continuity v0.3.0 // indirect github.com/containerd/fifo v1.0.0 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.13.0 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect github.com/containerd/ttrpc v1.1.1 // indirect github.com/containernetworking/plugins v1.1.1 // indirect github.com/coreos/go-systemd/v22 v22.4.0 // indirect @@ -334,8 +333,8 @@ require ( github.com/dgryski/go-jump v0.0.0-20211018200510-ba001c3ffce0 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/docker/cli v23.0.0-rc.1+incompatible // indirect - github.com/docker/distribution v2.8.1+incompatible // indirect + github.com/docker/cli v23.0.1+incompatible // indirect + github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/docker-credential-helpers v0.7.0 // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-units v0.5.0 // indirect @@ -358,22 +357,23 @@ require ( github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.20.0 // indirect github.com/go-openapi/loads v0.21.2 // indirect - github.com/go-openapi/runtime v0.24.2 // indirect - github.com/go-openapi/spec v0.20.7 // indirect - github.com/go-openapi/strfmt v0.21.3 // indirect + github.com/go-openapi/runtime v0.26.0 // indirect + github.com/go-openapi/spec v0.20.9 // indirect + github.com/go-openapi/strfmt v0.21.7 // indirect github.com/go-openapi/swag v0.22.3 // indirect - github.com/go-openapi/validate v0.22.0 // indirect + github.com/go-openapi/validate v0.22.1 // indirect github.com/go-redis/redis/v8 v8.11.5 // indirect github.com/go-test/deep v1.0.7 // indirect + github.com/godbus/dbus/v5 v5.0.6 // indirect github.com/gogo/googleapis v1.4.1 // indirect github.com/golang-jwt/jwt/v4 v4.4.2 // indirect - github.com/golang/glog v1.0.0 // indirect + github.com/golang/glog v1.1.0 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/licenseclassifier/v2 v2.0.0 // indirect github.com/google/uuid v1.3.0 github.com/google/wire v0.5.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect - github.com/googleapis/gax-go/v2 v2.7.1 // indirect + github.com/googleapis/gax-go/v2 v2.8.0 // indirect github.com/googleapis/gnostic v0.5.5 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect @@ -391,7 +391,7 @@ require ( github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0 // indirect github.com/ianlancetaylor/cgosymbolizer v0.0.0-20221208003206-eaf69f594683 github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639 // indirect - github.com/in-toto/in-toto-golang v0.7.1 // indirect + github.com/in-toto/in-toto-golang v0.8.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/itchyny/timefmt-go v0.1.5 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect @@ -456,7 +456,7 @@ require ( github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170 // indirect github.com/pborman/uuid v1.2.1 // indirect github.com/pelletier/go-toml v1.9.5 // indirect - github.com/philhofer/fwd v1.1.1 // indirect + github.com/philhofer/fwd v1.1.2 // indirect github.com/pierrec/lz4/v4 v4.1.17 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect @@ -465,7 +465,7 @@ require ( github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect github.com/rivo/uniseg v0.4.4 // indirect - github.com/rs/cors v1.8.3 // indirect + github.com/rs/cors v1.9.0 // indirect github.com/safchain/baloum v0.0.0-20221229104256-b1fc8f70a86b github.com/saracen/walker v0.0.0-20191201085201-324a081bae7e // indirect github.com/sassoftware/go-rpmutils v0.2.0 // indirect @@ -517,7 +517,7 @@ require ( go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/collector/consumer v0.75.0 // indirect go.opentelemetry.io/collector/featuregate v0.75.0 // indirect - go.opentelemetry.io/collector/semconv v0.75.0 // indirect + go.opentelemetry.io/collector/semconv v0.78.1 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.40.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.15.0 // indirect @@ -532,13 +532,13 @@ require ( go.opentelemetry.io/otel/sdk/metric v0.37.0 // indirect go.opentelemetry.io/otel/trace v1.14.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect - golang.org/x/crypto v0.7.0 // indirect - golang.org/x/mod v0.10.0 // indirect - golang.org/x/oauth2 v0.6.0 // indirect + golang.org/x/crypto v0.8.0 // indirect + golang.org/x/mod v0.10.0 + golang.org/x/oauth2 v0.8.0 // indirect golang.org/x/term v0.8.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect gonum.org/v1/gonum v0.12.0 // indirect - google.golang.org/api v0.114.0 // indirect + google.golang.org/api v0.119.0 // indirect google.golang.org/appengine v1.6.7 // indirect gopkg.in/Knetic/govaluate.v3 v3.0.0 // indirect gopkg.in/cheggaaa/pb.v1 v1.0.28 // indirect @@ -566,14 +566,21 @@ require ( ) require ( + github.com/DataDog/datadog-agent/pkg/proto v0.46.0-rc.2 + github.com/aquasecurity/trivy v0.0.0-00010101000000-000000000000 + github.com/cloudfoundry-community/go-cfclient/v2 v2.0.1-0.20230503155151-3d15366c5820 github.com/godror/godror v0.37.0 github.com/jmoiron/sqlx v1.3.5 + github.com/kr/pretty v0.3.1 github.com/sijms/go-ora/v2 v2.7.6 ) require ( github.com/cloudflare/circl v1.1.0 // indirect github.com/godror/knownpb v0.1.0 // indirect + github.com/google/s2a-go v0.1.2 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/rogpeppe/go-internal v1.10.0 // indirect github.com/rs/zerolog v1.29.0 // indirect github.com/sigstore/rekor v1.0.1 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect @@ -589,7 +596,7 @@ replace github.com/pahanini/go-grpc-bidirectional-streaming-example v0.0.0-20211 replace github.com/dgrijalva/jwt-go => github.com/golang-jwt/jwt v3.2.1+incompatible // Remove once the issue https://github.com/microsoft/Windows-Containers/issues/72 is resolved -replace github.com/golang/glog v1.0.0 => github.com/DataDog/glog v1.0.1-0.20211019114809-ec0f43a655b9 +replace github.com/golang/glog v1.1.0 => github.com/DataDog/glog v1.1.2-0.20230527101146-81a67cdbc7a1 replace github.com/vishvananda/netlink => github.com/DataDog/netlink v1.0.1-0.20220504230202-f7323aba1f6c @@ -601,7 +608,7 @@ replace k8s.io/kube-state-metrics/v2 => github.com/datadog/kube-state-metrics/v2 // Use custom Trivy fork to reduce binary size // Pull in replacements needed by upstream Trivy replace ( - github.com/aquasecurity/trivy => github.com/DataDog/trivy v0.0.0-20230418154509-807f757a8339 + github.com/aquasecurity/trivy => github.com/DataDog/trivy v0.0.0-20230526171704-5aaa4575395e github.com/saracen/walker => github.com/DataDog/walker v0.0.0-20230418153152-7f29bb2dc950 github.com/spdx/tools-golang => github.com/spdx/tools-golang v0.3.0 oras.land/oras-go => oras.land/oras-go v1.1.1 @@ -624,3 +631,12 @@ replace ( k8s.io/metrics => k8s.io/metrics v0.23.15 sigs.k8s.io/custom-metrics-apiserver => sigs.k8s.io/custom-metrics-apiserver v1.23.0 ) + +// Fixes CVE-2023-1732, imported by nikos +replace github.com/cloudflare/circl v1.1.0 => github.com/cloudflare/circl v1.3.3 + +// Fixes CVE-2023-30551, imported by trivy +replace github.com/sigstore/rekor v1.0.1 => github.com/sigstore/rekor v1.1.1 + +// Fixes CVE-2023-26054, imported by trivy +replace github.com/moby/buildkit v0.11.0 => github.com/moby/buildkit v0.11.4 diff --git a/go.sum b/go.sum index 0732e9201af47..d4783c455a2d9 100644 --- a/go.sum +++ b/go.sum @@ -29,15 +29,15 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= -cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/iam v0.12.0 h1:DRtTY29b75ciH6Ov1PHb4/iat2CLCvrOm40Q0a6DFpE= -cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= @@ -69,8 +69,6 @@ code.cloudfoundry.org/go-diodes v0.0.0-20190809170250-f77fb823c7ee h1:iAAPf9s7/+ code.cloudfoundry.org/go-diodes v0.0.0-20190809170250-f77fb823c7ee/go.mod h1:Jzi+ccHgo/V/PLQUaQ6hnZcC1c4BS790gx21LRRui4g= code.cloudfoundry.org/go-loggregator v7.4.0+incompatible h1:KqZYloMQWM5Zg/BQKunOIA4OODh7djZbk48qqbowNFI= code.cloudfoundry.org/go-loggregator v7.4.0+incompatible/go.mod h1:KPBTRqj+y738Nhf1+g4JHFaBU8j7dedirR5ETNHvMXU= -code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f h1:UrKzEwTgeiff9vxdrfdqxibzpWjxLnuXDI5m6z3GJAk= -code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:sk5LnIjB/nIEU7yP5sDQExVm62wu0pBh3yrElngUisI= code.cloudfoundry.org/lager v2.0.0+incompatible h1:WZwDKDB2PLd/oL+USK4b4aEjUymIej9My2nUQ9oWEwQ= code.cloudfoundry.org/lager v2.0.0+incompatible/go.mod h1:O2sS7gKP3HM2iemG+EnwvyNQK7pTSC6Foi4QiMp9sSk= code.cloudfoundry.org/locket v0.0.0-20200131001124-67fd0a0fdf2d h1:4a9j7UW7yfC57sgUI0ZIJRZDl4Jzopm1LjEa/r5v36I= @@ -124,10 +122,10 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/CycloneDX/cyclonedx-go v0.7.0 h1:jNxp8hL7UpcvPDFXjY+Y1ibFtsW+e5zyF9QoSmhK/zg= -github.com/CycloneDX/cyclonedx-go v0.7.0/go.mod h1:W5Z9w8pTTL+t+yG3PCiFRGlr8PUlE0pGWzKSJbsyXkg= -github.com/DataDog/agent-payload/v5 v5.0.81 h1:ANnPje8r65ZZ0Ku3CTC4CDwBGRMLUa7dXxo6ik9N7cE= -github.com/DataDog/agent-payload/v5 v5.0.81/go.mod h1:oQZi1VZp1e3QvlSUX4iphZCpJaFepUxWq0hNXxihKBM= +github.com/CycloneDX/cyclonedx-go v0.7.1 h1:5w1SxjGm9MTMNTuRbEPyw21ObdbaagTWF/KfF0qHTRE= +github.com/CycloneDX/cyclonedx-go v0.7.1/go.mod h1:N/nrdWQI2SIjaACyyDs/u7+ddCkyl/zkNs8xFsHF2Ps= +github.com/DataDog/agent-payload/v5 v5.0.85 h1:kmcBpkNwbfKCP3fBENZ6uvahvheTMgwjVjONKajVbI8= +github.com/DataDog/agent-payload/v5 v5.0.85/go.mod h1:oQZi1VZp1e3QvlSUX4iphZCpJaFepUxWq0hNXxihKBM= github.com/DataDog/appsec-internal-go v0.0.0-20230215162203-5149228be86a h1:7ZiVdU4j19IYuy8rR0uUzC7I7HjWul61ZEyUgvLkZBM= github.com/DataDog/appsec-internal-go v0.0.0-20230215162203-5149228be86a/go.mod h1:ILSJBuOg3E0Jg8qgSnm7+g8DXa0KrfahnS7jhS1DoWs= github.com/DataDog/aptly v1.5.1 h1:Znm0WZ/cSjjTLe0HY3rKN1uqa7YIu+uIo3UQG/0WlIM= @@ -137,22 +135,20 @@ github.com/DataDog/cast v1.3.1-0.20190301154711-1ee8c8bd14a3/go.mod h1:Qx5cxh0v+ github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go/v5 v5.1.1 h1:JLZ6s2K1pG2h9GkvEvMdEGqMDyVLEAccdX5TltWcLMU= github.com/DataDog/datadog-go/v5 v5.1.1/go.mod h1:KhiYb2Badlv9/rofz+OznKoEF5XKTonWyhx5K83AP8E= -github.com/DataDog/datadog-operator v0.7.1-0.20230215125730-2ba58ce29d56 h1:uj+2t8VpFSgnRucSTr3fG3atc1I6j6fgSw8+nCxnwOs= -github.com/DataDog/datadog-operator v0.7.1-0.20230215125730-2ba58ce29d56/go.mod h1:3XBfwfK0wBBcs7L+10JI79pg6vYdbOwY3oj+O4Omi6c= +github.com/DataDog/datadog-operator v1.0.3 h1:zBGNnmFsU99wttrt0PWXXRgJvTRGeWt3YD7wUh7VBd4= +github.com/DataDog/datadog-operator v1.0.3/go.mod h1:CXTLg7VFcpaMvjbwkKTxKUIzxRumfSe01/CWOINo4c0= github.com/DataDog/ebpf-manager v0.2.8-0.20230331131947-0cbd4db2728c h1:tX6ul0I7xhlzihcBU/ivz1cr50VzqVl8qDRtSqVB4F8= github.com/DataDog/ebpf-manager v0.2.8-0.20230331131947-0cbd4db2728c/go.mod h1:QNbbXqQdl1RDRDna/vSHzhpTNXTnqxxJRVXdeANgCUA= github.com/DataDog/extendeddaemonset v0.9.0-rc.2 h1:uTE/QEU0oYtHnebKSMbxap7XMG5603WQxNP/UX63E7k= github.com/DataDog/extendeddaemonset v0.9.0-rc.2/go.mod h1:JgKVGTsjdTdtJjNyxRZjcs81/rng6LJ3XX/0D7Y12Gc= -github.com/DataDog/glog v1.0.1-0.20211019114809-ec0f43a655b9 h1:lV/hyE5bbXqUJQe+6LXdElwOc0uShL6yzz2vwZ1B314= -github.com/DataDog/glog v1.0.1-0.20211019114809-ec0f43a655b9/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/DataDog/glog v1.1.2-0.20230527101146-81a67cdbc7a1 h1:YpYdpEG3ohpETQTzz9u4bTvvJUzkRFwMyLrx/jtbU5g= +github.com/DataDog/glog v1.1.2-0.20230527101146-81a67cdbc7a1/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/DataDog/go-grpc-bidirectional-streaming-example v0.0.0-20221024060302-b9cf785c02fe h1:RO40ywnX/vZLi4Pb4jRuFGgQQBYGIIoQ6u+P2MIgFOA= github.com/DataDog/go-grpc-bidirectional-streaming-example v0.0.0-20221024060302-b9cf785c02fe/go.mod h1:90sqV0j7E8wYCyqIp5d9HmYWLTFQttqPFFtNYDyAybQ= github.com/DataDog/go-libddwaf v1.0.0 h1:C0cHE++wMFWf5/BDO8r/3dTDCj21U/UmPIT0PiFMvsA= github.com/DataDog/go-libddwaf v1.0.0/go.mod h1:DI5y8obPajk+Tvy2o+nZc2g/5Ria/Rfq5/624k7pHpE= github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork h1:yBq5PrAtrM4yVeSzQ+bn050+Ysp++RKF1QmtkL4VqvU= github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork/go.mod h1:yA5JwkZsHTLuqq3zaRgUQf35DfDkpOZqgtBqHKpwrBs= -github.com/DataDog/gohai v0.0.0-20221116153829-5d479901d2e9 h1:QBZ04VJZLtjKL4MbJbB/7/stiJVvobfZFIgFot0fB7M= -github.com/DataDog/gohai v0.0.0-20221116153829-5d479901d2e9/go.mod h1:2rZHLOzPdGj56gQon9IyLVR7s18lOpUGxmErHt9HWP8= github.com/DataDog/gopsutil v1.2.2 h1:8lmthwyyCXa1NKiYcHlrtl9AAFdfbNI2gPcioCJcBPU= github.com/DataDog/gopsutil v1.2.2/go.mod h1:glkxNt/qRu9lnpmUEQwOIAXW+COWDTBOTEAHqbgBPts= github.com/DataDog/gostackparse v0.5.0 h1:jb72P6GFHPHz2W0onsN51cS3FkaMDcjb0QzgxxA4gDk= @@ -165,17 +161,17 @@ github.com/DataDog/netlink v1.0.1-0.20220504230202-f7323aba1f6c h1:w4mZkX45/iUye github.com/DataDog/netlink v1.0.1-0.20220504230202-f7323aba1f6c/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/DataDog/nikos v1.12.0 h1:B9seaiowtbMyH2MX3f3/EcEk1sznSFXaPBwTcBLdogU= github.com/DataDog/nikos v1.12.0/go.mod h1:vboQtY04KmE+Ua8m7gVheZJcnStQY+fIiSPY/6jJVrY= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.2.0 h1:vfItShcj4B+2aKfucy61Vh/RL+JP6SCONhGyW515cBY= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.2.0 h1:WiLLKBFvMf8R53s610OlgsgsOk5m9t7imfDSj9qUDQs= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.2.0/go.mod h1:FN5Kqegof+xUhQy4W3rIQ7qHpBehbB6EnEs1BXWzJDI= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.2.0 h1:/mINULat/FNCvlS0294s+lBdFeC1VP31BYCLkYImk8g= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.2.0/go.mod h1:VHhvwnkW6aO+NnV0JRF838rEQeKshvB3v2XPwJiVxto= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.2.0 h1:Iw7eXZOMkFTWX/PXEK8qz4s/o01GrSiaPKLJPMBxD9w= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.2.0/go.mod h1:AMPzJ3k7k6NlhpmZXjjnLmZNVnZTnJsj0cpgL9KduyI= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.2.3 h1:IznT4hQ24X3gOir+AVotp8h/ogYoXAmvU5jggBqOO8o= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.2.3 h1:QM2aly2st5pdRuDhc8w026YXNIm9IjjyaXhJ4BTdTZQ= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.2.3/go.mod h1:q5aHWhZtr114VrPgmlJL+JnemK7O66YxDCRy9zAHAxU= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.2.3 h1:Ruz1pG5VktrppFfqUcXP2Oi0ET3z5P1KdGWCt5o1m38= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.2.3/go.mod h1:wqX2en+xnhopVIPNwxhlU/y/eV/5eNcLQsztsfoe2tg= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.2.3 h1:qE9/iNHkwuIcSpsecG0L+rXX/dxULKWGZrSH4UHO0fA= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.2.3/go.mod h1:sE+tvtb7R7a9QJiLkct3WKQWKpTznLtbDRcXlnb5SNs= github.com/DataDog/sketches-go v1.4.2 h1:gppNudE9d19cQ98RYABOetxIhpTCl4m7CnbRZjvVA/o= github.com/DataDog/sketches-go v1.4.2/go.mod h1:xJIXldczJyyjnbDop7ZZcLxJdV3+7Kra7H1KMgpgkLk= -github.com/DataDog/trivy v0.0.0-20230418154509-807f757a8339 h1:ursx1WwFb+b0arV1d4061adoX0qGKGvZbBYKWiwYza4= -github.com/DataDog/trivy v0.0.0-20230418154509-807f757a8339/go.mod h1:f1nyvEyWU3w5L8dYIQljloMO/BX0i0OK9NwGeZtvmFw= +github.com/DataDog/trivy v0.0.0-20230526171704-5aaa4575395e h1:sw8zPMtAx20gY2wSdANQ/Z64B1hOMrku+zoPFGcumLk= +github.com/DataDog/trivy v0.0.0-20230526171704-5aaa4575395e/go.mod h1:f1nyvEyWU3w5L8dYIQljloMO/BX0i0OK9NwGeZtvmFw= github.com/DataDog/viper v1.12.0 h1:FufyZpZPxyszafSV5B8Q8it75IhhuJwH0T7QpT6HnD0= github.com/DataDog/viper v1.12.0/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= github.com/DataDog/walker v0.0.0-20230418153152-7f29bb2dc950 h1:2imDajw3V85w1iqHsuXN+hUBZQVF+r9eME8tsPq/HpA= @@ -214,8 +210,8 @@ github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JP github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= -github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= @@ -224,8 +220,8 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2 github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= -github.com/Microsoft/hcsshim v0.9.8 h1:lf7xxK2+Ikbj9sVf2QZsouGjRjEp2STj1yDHgoVtU5k= -github.com/Microsoft/hcsshim v0.9.8/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim v0.9.9 h1:FYrTiCNOc8ZddNBVkJBxWZYm22rgxHFmxMoGK66sDF0= +github.com/Microsoft/hcsshim v0.9.9/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -312,8 +308,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= -github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/avast/retry-go/v4 v4.3.4 h1:pHLkL7jvCvP317I8Ge+Km2Yhntv3SdkJm7uekkqbKhM= github.com/avast/retry-go/v4 v4.3.4/go.mod h1:rv+Nla6Vk3/ilU0H51VHddWHiwimzX66yZ0JT6T+UvE= github.com/awalterschulze/gographviz v0.0.0-20160912181450-761fd5fbb34e/go.mod h1:GEV5wmg4YquNw7v1kkyoX9etIk8yVmXj+AkDHuuETHs= @@ -404,7 +400,7 @@ github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7 github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA= github.com/caarlos0/env/v6 v6.10.1 h1:t1mPSxNpei6M5yAeu1qtRdPAK29Nbcf/n3G7x+b3/II= github.com/caarlos0/env/v6 v6.10.1/go.mod h1:hvp/ryKXKipEkcuYjs9mI4bBCg+UI0Yhgm5Zu0ddvwc= @@ -448,10 +444,10 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I= github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.1.0 h1:bZgT/A+cikZnKIwn7xL2OBj012Bmvho/o6RpRvv3GKY= -github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= -github.com/cloudfoundry-community/go-cfclient v0.0.0-20210621174645-7773f7e22665 h1:LDCKU3OIIsl7sX1KggC6zIHKk0PCeYbmOGbCHqNCSOQ= -github.com/cloudfoundry-community/go-cfclient v0.0.0-20210621174645-7773f7e22665/go.mod h1:0FdHblxw7g3M2PPICOw9i8YZOHP9dZTHbJUtoxL7Z/E= +github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= +github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/cloudfoundry-community/go-cfclient/v2 v2.0.1-0.20230503155151-3d15366c5820 h1:ixkQUDJYG6eSxgUEl6LLE2l2TD2C5AYmlm+fVhsr6Zs= +github.com/cloudfoundry-community/go-cfclient/v2 v2.0.1-0.20230503155151-3d15366c5820/go.mod h1:hB1MLDqhbVF09FbBLrU430kDZZPAO9lVck00xhljoqU= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -536,8 +532,8 @@ github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFY github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= -github.com/containerd/stargz-snapshotter/estargz v0.13.0 h1:fD7AwuVV+B40p0d9qVkH/Au1qhp8hn/HWJHIYjpEcfw= -github.com/containerd/stargz-snapshotter/estargz v0.13.0/go.mod h1:m+9VaGJGlhCnrcEUod8mYumTmRgblwd3rC5UCEh2Yp0= +github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k= +github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= @@ -627,16 +623,16 @@ github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/ github.com/dlclark/regexp2 v1.4.0 h1:F1rxgk7p4uKjwIQxBs9oAXe5CqrXlCduYEJvrF4u93E= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v23.0.0-rc.1+incompatible h1:Vl3pcUK4/LFAD56Ys3BrqgAtuwpWd/IO3amuSL0ZbP0= -github.com/docker/cli v23.0.0-rc.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v23.0.1+incompatible h1:LRyWITpGzl2C9e9uGxzisptnxAn1zfZKXy13Ul2Q5oM= +github.com/docker/cli v23.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v23.0.5+incompatible h1:DaxtlTJjFSnLOXVNUBU1+6kXGz2lpDoEAH6QoxaSg8k= -github.com/docker/docker v23.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v24.0.2+incompatible h1:eATx+oLz9WdNVkQrr0qjQ8HvRJ4bOOxfzEo8R+dA3cg= +github.com/docker/docker v24.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= @@ -789,18 +785,18 @@ github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXym github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= -github.com/go-openapi/runtime v0.24.2 h1:yX9HMGQbz32M87ECaAhGpJjBmErO3QLcgdZj9BzGx7c= -github.com/go-openapi/runtime v0.24.2/go.mod h1:AKurw9fNre+h3ELZfk6ILsfvPN+bvvlaU/M9q/r9hpk= +github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc= +github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/spec v0.20.7 h1:1Rlu/ZrOCCob0n+JKKJAWhNWMPW8bOZRg8FJaY+0SKI= -github.com/go-openapi/spec v0.20.7/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= +github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= -github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= -github.com/go-openapi/strfmt v0.21.3 h1:xwhj5X6CjXEZZHMWy1zKJxvW9AfHC9pkyUjLvHtKG7o= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= +github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= +github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= @@ -808,9 +804,8 @@ github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/ github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= -github.com/go-openapi/validate v0.22.0 h1:b0QecH6VslW/TxtpKgzpO1SNG7GU2FsaqKdP1E2T50Y= -github.com/go-openapi/validate v0.22.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= +github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= @@ -827,7 +822,6 @@ github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LB github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-test/deep v1.0.7 h1:/VSMRlnY/JSyqxQUzQLKVMAskpY/NZKFA5j2P+0pP2M= @@ -866,6 +860,7 @@ github.com/godbus/dbus v4.1.0+incompatible h1:WqqLRTsQic3apZUK9qC5sGNfXthmPXzUZ7 github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godror/godror v0.37.0 h1:3wR3/1msywDE49PzuXh9UUiwWOBNri0RVQQcu3HU4UY= github.com/godror/godror v0.37.0/go.mod h1:jW1+pN+z/V0h28p9XZXVNtEvfZP/2EBfaSjKJLp3E4g= @@ -889,6 +884,7 @@ github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -953,8 +949,8 @@ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= -github.com/google/go-containerregistry v0.12.0 h1:nidOEtFYlgPCRqxCKj/4c/js940HVWplCWc5ftdfdUA= -github.com/google/go-containerregistry v0.12.0/go.mod h1:sdIK+oHQO7B93xI8UweYdl887YhuIwg9vz8BSLH3+8k= +github.com/google/go-containerregistry v0.14.0 h1:z58vMqHxuwvAsVwvKEkmVBz2TlgBgH5k6koEXBtlYkw= +github.com/google/go-containerregistry v0.14.0/go.mod h1:aiJ2fp/SXvkWgmYHioXnbMdlgB8eXiiYOY55gfN91Wk= github.com/google/go-dap v0.6.0/go.mod h1:5q8aYQFnHOAZEMP+6vmq25HKYAEwE+LF5yh7JKrrhSQ= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -985,6 +981,8 @@ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.2 h1:WVtYAYuYxKeYajAmThMRYWP6K3wXkcqbGHeUgeubUHY= +github.com/google/s2a-go v0.1.2/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -999,8 +997,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9 github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= -github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.8.0 h1:UBtEZqx1bjXtOQ5BVTkuYghXrr3N4V123VKJK67vJZc= +github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= @@ -1039,8 +1037,8 @@ github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg= github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= -github.com/hashicorp/consul/api v1.19.1 h1:GLeK1WD4VIRvt4wRhQKHFudztEkRb8pDs+uRiJgNwes= -github.com/hashicorp/consul/api v1.19.1/go.mod h1:jAt316eYgWGNLJtxkMQrcqRpuDE/kFJdqkEFwRXFv8U= +github.com/hashicorp/consul/api v1.20.0 h1:9IHTjNVSZ7MIwjlW3N3a7iGiykCMDpxZu8jsxFJh0yc= +github.com/hashicorp/consul/api v1.20.0/go.mod h1:nR64eD44KQ59Of/ECwt2vUmIK2DKsDzAwTmwmLl8Wpo= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/consul/sdk v0.13.1 h1:EygWVWWMczTzXGpO93awkHFzfUka6hLYJ0qhETd+6lY= @@ -1136,10 +1134,10 @@ github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= -github.com/in-toto/in-toto-golang v0.7.1 h1:IzOB18y7/4KoEp4/RiWnuIXUpqi9+5yGlRy/t/QRDWE= -github.com/in-toto/in-toto-golang v0.7.1/go.mod h1:m7HiDiYvPz+7SkqU9Tnt9hNgJfA31/nr1GSlDlxrQmE= +github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= +github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/in-toto/in-toto-golang v0.8.0 h1:MTVK138TdSUbScuy3XQiRlV5U5a1UkFdz+2gyvF42V0= +github.com/in-toto/in-toto-golang v0.8.0/go.mod h1:u8GkjDht81AcD7GrNAPLZl4jsRF//f306QDHZ5mBIyI= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= @@ -1220,7 +1218,6 @@ github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/compress v1.16.3 h1:XuJt9zzcnaz6a16/OU53ZjWp/v7/42WcR5t2a0PcNQY= github.com/klauspost/compress v1.16.3/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= @@ -1248,6 +1245,7 @@ github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -1379,7 +1377,6 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= @@ -1388,8 +1385,8 @@ github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zx github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mkrautz/goar v0.0.0-20150919110319-282caa8bd9da h1:Iu5QFXIMK/YrHJ0NgUnK0rqYTTyb0ldt/rqNenAj39U= github.com/mkrautz/goar v0.0.0-20150919110319-282caa8bd9da/go.mod h1:NfnmoBY0gGkr3/NmI+DP/UXbZvOCurCUYAzOdYJjlOc= -github.com/moby/buildkit v0.11.0 h1:GqBC/ETDqwdu61g4tCxX1GFZuGWg/nuqFxamb2or1dw= -github.com/moby/buildkit v0.11.0/go.mod h1:v43oa6H2Fx/cdzc7j0UlUu8p6188yy1P3vrujAs99uw= +github.com/moby/buildkit v0.11.4 h1:mleVHr+n7HUD65QNUkgkT3d8muTzhYUoHE9FM3Ej05s= +github.com/moby/buildkit v0.11.4/go.mod h1:P5Qi041LvCfhkfYBHry+Rwoo3Wi6H971J2ggE+PcIoo= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= @@ -1478,8 +1475,8 @@ github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7 github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/onsi/gomega v1.24.1 h1:KORJXNNTzJXzu4ScJWssJfJMnJ+2QJqhoQSRwNlze9E= -github.com/open-policy-agent/opa v0.51.0 h1:2hS5xhos8HtkN+mgpqMhNJSFtn/1n/h3wh+AeTPJg6Q= -github.com/open-policy-agent/opa v0.51.0/go.mod h1:OjmwLfXdeR7skSxrt8Yd3ScXTqPxyJn7GeTRJrcEerU= +github.com/open-policy-agent/opa v0.53.0 h1:zC/0sI+Gof5/oiFNS3DmoJa11D0m0InZeDhZyzi+l6E= +github.com/open-policy-agent/opa v0.53.0/go.mod h1:j3wl8FqSz/+u33Scl72Ms2wxkZx4yZPdqSCrOqBqdsA= github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.75.0 h1:XW4DBJP3+dgdclPVA7d9aetG/FBUmwSNQGWaWoZnyo0= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.75.0 h1:YUku2qImuCj85X7LNGjToa3X1hJUd3VIBGVVbikGv08= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.75.0/go.mod h1:Zx+/9iSrxOX8yI5pq1yQjLkTW/mQRs8kw4t0w4Ro820= @@ -1507,8 +1504,8 @@ github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.m github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.1.0-rc.1 h1:wHa9jroFfKGQqFHj0I1fMRKLl0pfj+ynAqBxo3v6u9w= -github.com/opencontainers/runtime-spec v1.1.0-rc.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.1.0-rc.2 h1:ucBtEms2tamYYW/SvGpvq9yUN0NEVL6oyLEwDcTSrk8= +github.com/opencontainers/runtime-spec v1.1.0-rc.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= @@ -1547,8 +1544,8 @@ github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3v github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= -github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= +github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.2/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= @@ -1584,8 +1581,8 @@ github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= -github.com/prometheus/client_golang v1.15.0 h1:5fCgGYogn0hFdhyhLbw7hEsWxufKtY9klyvdNfFlFhM= -github.com/prometheus/client_golang v1.15.0/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= +github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= +github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1619,8 +1616,8 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/procfs v0.10.0 h1:UkG7GPYkO4UZyLnyXjaWYcgOSONqwdBqFUT95ugmt6I= +github.com/prometheus/procfs v0.10.0/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0= github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= @@ -1643,9 +1640,11 @@ github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/rs/cors v1.8.3 h1:O+qNyWn7Z+F9M0ILBHgMVPuB1xTOucVd5gtaYyXBpRo= -github.com/rs/cors v1.8.3/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rs/cors v1.9.0 h1:l9HGsTsHJcvW14Nk7J9KFz8bzeAWXn3CG6bgt7LsrAE= +github.com/rs/cors v1.9.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.28.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= github.com/rs/zerolog v1.29.0 h1:Zes4hju04hjbvkVkOhdl2HpZa+0PmVwigmo8XoORE5w= @@ -1660,8 +1659,8 @@ github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIH github.com/safchain/baloum v0.0.0-20221229104256-b1fc8f70a86b h1:cTiH46CYvPhgOlE0t82N+rgQw44b7vB39ay+P+wiVz8= github.com/safchain/baloum v0.0.0-20221229104256-b1fc8f70a86b/go.mod h1:1+GWOH32bsIEAHknYja6/H1efcDs+/Q2XrtYMM200Ho= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/samber/lo v1.37.0 h1:XjVcB8g6tgUp8rsPsJ2CvhClfImrpL04YpQHXeHPhRw= -github.com/samber/lo v1.37.0/go.mod h1:9vaz2O4o8oOnK23pd2TrXufcbdbJIa3b6cstBWKpopA= +github.com/samber/lo v1.38.1 h1:j2XEAqXKb09Am4ebOg31SpvzUTTs6EN3VfgeLUhPdXM= +github.com/samber/lo v1.38.1/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da h1:p3Vo3i64TCLY7gIfzeQaUJ+kppEO5WQG3cL8iE8tGHU= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sassoftware/go-rpmutils v0.2.0 h1:pKW0HDYMFWQ5b4JQPiI3WI12hGsVoW0V8+GMoZiI/JE= @@ -1679,7 +1678,6 @@ github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= -github.com/shirou/gopsutil/v3 v3.22.10/go.mod h1:QNza6r4YQoydyCfo6rH0blGfKahgibh4dQmV5xdFkQk= github.com/shirou/gopsutil/v3 v3.23.2 h1:PAWSuiAszn7IhPMBtXsbSCafej7PqUOvY6YywlQUExU= github.com/shirou/gopsutil/v3 v3.23.2/go.mod h1:gv0aQw33GLo3pG8SiWKiQrbDzbRY1K80RyZJ7V4Th1M= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 h1:udFKJ0aHUL60LboW/A+DfgoHVedieIzIXE8uylPue0U= @@ -1687,8 +1685,8 @@ github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sigstore/rekor v1.0.1 h1:rcESXSNkAPRWFYZel9rarspdvneET60F2ngNkadi89c= -github.com/sigstore/rekor v1.0.1/go.mod h1:ecTKdZWGWqE1pl3U1m1JebQJLU/hSjD9vYHOmHQ7w4g= +github.com/sigstore/rekor v1.1.1 h1:JCeSss+qUHnCATmwAZh4zT9k0Frdyq0BjmRwewSfEy4= +github.com/sigstore/rekor v1.1.1/go.mod h1:x/xK+HK08MiuJv+v4OxY/Oo3bhuz1DtJXNJrV7hrzvs= github.com/sijms/go-ora/v2 v2.7.6 h1:QyR1CKFxG+VVk2+LdHoHF4NxDSvcQ3deBXtZCrahSq4= github.com/sijms/go-ora/v2 v2.7.6/go.mod h1:EHxlY6x7y9HAsdfumurRfTd+v8NrEOTR3Xl4FWlH6xk= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= @@ -1700,8 +1698,8 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.2 h1:oxx1eChJGI6Uks2ZC4W1zpLlVgqB8ner4EuQwV4Ik1Y= +github.com/sirupsen/logrus v1.9.2/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skydive-project/go-debouncer v1.0.0 h1:cqU19PyN7WXsnSlMTANvnHws6lGcbVOH2aDQzwe6qbk= github.com/skydive-project/go-debouncer v1.0.0/go.mod h1:7pK+5HBlYCD8W2cXhvMRsMsdWelDEPfpbE6PwSlDX68= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -1716,8 +1714,9 @@ github.com/smira/flag v0.0.0-20170926215700-695ea5e84e76/go.mod h1:KQ5bP0mZypI2q github.com/smira/go-aws-auth v0.0.0-20180731211914-8b73995fd8d1/go.mod h1:KKhbssKjyR//TUP31t3ksE2b6oeAw328JzwmFJnzRCw= github.com/smira/go-ftp-protocol v0.0.0-20140829150050-066b75c2b70d h1:rvtR4+9N2LWPo0UHe6/aHvWpqD9Dhf10P2bfGFht74g= github.com/smira/go-ftp-protocol v0.0.0-20140829150050-066b75c2b70d/go.mod h1:Jm7yHrROA5tC42gyJ5EwiR8EWp0PUy0qOc4sE7Y8Uzo= -github.com/smira/go-xz v0.0.0-20220607140411-c2a07d4bedda h1:WWMF6Bz2r8/91uUs4ZYk10zSSflqHDE5Ot3/s1yz+x4= github.com/smira/go-xz v0.0.0-20220607140411-c2a07d4bedda/go.mod h1:RdN8UkuBr4amSnXBHKWkn6p1mXqYjHw+Yvxz8gQfU5A= +github.com/smira/go-xz v0.1.0 h1:1zVLT1sITUKcWNysfHMLZWJ2Yh7yJfhREsgmUdK4zb0= +github.com/smira/go-xz v0.1.0/go.mod h1:OmdEWnIIkuLzRLHGF4YtjDzF9VFUevEcP6YxDPRqVrs= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= @@ -1775,8 +1774,9 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1801,12 +1801,10 @@ github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhV github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/tinylib/msgp v1.1.6 h1:i+SbKraHhnrf9M5MYmvQhFnbLhAXSDWF8WWsuyRdocw= -github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw= -github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= +github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0= +github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw= github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= -github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -1951,7 +1949,6 @@ go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20220522111935-c3bc4116dcd1 h1:S801WV go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20220522111935-c3bc4116dcd1/go.mod h1:sw82kLjlBpuhowfKyi54jk2s8qK8W4YG5EwlY/BleOY= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= -go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= go.mongodb.org/mongo-driver v1.11.4 h1:4ayjakA013OdpGyL2K3ZqylTac/rMjrJOMZ1EHizXas= go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= @@ -1982,16 +1979,16 @@ go.opentelemetry.io/collector/exporter/otlpexporter v0.75.0/go.mod h1:Vl7krXCRPY go.opentelemetry.io/collector/extension/zpagesextension v0.75.0 h1:QHqkG2oEOSP3npFH8sTw4XQh9A138q+fVzAUw6YFNFw= go.opentelemetry.io/collector/featuregate v0.75.0 h1:543kdhXh7/dHTwpHsjv+lgIz73RJD2lCkLrFi4UjZjk= go.opentelemetry.io/collector/featuregate v0.75.0/go.mod h1:pmVMr98Ps6QKyEHiVPN7o3Qd8K//M2NapfOv5BMWvA0= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0011 h1:7lT0vseP89mHtUpvgmWYRvQZ0eY+SHbVsnXY20xkoMg= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0011/go.mod h1:9vrXSQBeMRrdfGt9oMgYweqERJ8adaiQjN6LSbqRMMA= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0012 h1:R+cfEUMyLn9Q1QknyQ4QU77pbfc1aJKYEXFHtnwSbCg= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0012/go.mod h1:rEAKFqc1L03lidKtra/2/dJtI0Hp+JsQxuPEIkj/2Vg= go.opentelemetry.io/collector/processor/batchprocessor v0.75.0 h1:eMRde/kjWdgpBKFWRS7Z1sfv20IvapdDMyOAV/I1ZZ8= go.opentelemetry.io/collector/processor/batchprocessor v0.75.0/go.mod h1:QK6WYbCI1rkX08AliBnWKiWkUhy8d08pG3n02GKw2lY= go.opentelemetry.io/collector/receiver v0.75.0 h1:ZgoShBSTprt7vExTLtXTmEH05qIHU3tORhBWyk0PuB4= go.opentelemetry.io/collector/receiver v0.75.0/go.mod h1:MADsPYeztg9cGUZIjmv5ayzntt69blxfmmZHlgdM1Aw= go.opentelemetry.io/collector/receiver/otlpreceiver v0.75.0 h1:VXOt3k/zB/R2SxFNJQTDWm/KifCMW60m8Q3O5E2TV2c= go.opentelemetry.io/collector/receiver/otlpreceiver v0.75.0/go.mod h1:DjuZIm8UHX5i3YEQFPi0XWbdNOSwFO9Xnm+WaB5wXzc= -go.opentelemetry.io/collector/semconv v0.75.0 h1:zIlZk+zh1bgc3VKE1PZEmhOaVa4tQHZMcFFUXmGekVs= -go.opentelemetry.io/collector/semconv v0.75.0/go.mod h1:xt8oDOiwa1jy24tGUo8+SzpphI7ZredS2WM/0m8rtTA= +go.opentelemetry.io/collector/semconv v0.78.1 h1:YlhokDVTP+gw6yKA0Jc2FcfddhD+a6E5Ixmby5xBWs0= +go.opentelemetry.io/collector/semconv v0.78.1/go.mod h1:lazBA42nqZPNPWDMiqWfr5eIVeNgRmoLDbQmjXKcm70= go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= @@ -2094,7 +2091,6 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= @@ -2106,8 +2102,10 @@ golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= +golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -2211,15 +2209,18 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220923203811-8be639271d50/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= @@ -2241,8 +2242,8 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= -golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= +golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2374,12 +2375,12 @@ golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2392,6 +2393,7 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= @@ -2402,6 +2404,7 @@ golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuX golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= @@ -2418,6 +2421,7 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= @@ -2492,7 +2496,6 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -2508,6 +2511,7 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= @@ -2544,8 +2548,8 @@ google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjR google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= -google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= -google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/api v0.119.0 h1:Dzq+ARD6+8jmd5wknJE1crpuzu1JiovEU6gCp9PkoKA= +google.golang.org/api v0.119.0/go.mod h1:CrSvlNEFCFLae9ZUtL1z+61+rEBD7J/aCYwVYKZoWFU= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2609,8 +2613,8 @@ google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaE google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230320184635-7606e756e683 h1:khxVcsk/FhnzxMKOyD+TDGwjbEOpcPuIpmafPGFmhMA= -google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -2641,8 +2645,9 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= +google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= google.golang.org/grpc/examples v0.0.0-20221020162917-9127159caf5a h1:p51n6zkL483uumoZhCSGtHCem9kDeU05G5jX/wYI9gw= google.golang.org/grpc/examples v0.0.0-20221020162917-9127159caf5a/go.mod h1:gxndsbNG1n4TZcHGgsYEfVGnTxqfEdfiDv6/DADXX9o= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -2712,7 +2717,6 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/zorkian/go-datadog-api.v2 v2.30.0 h1:umQdVO0Ytx+kYadhuJNjFtDgIsIEBnKrOTvNuu8ClKI= diff --git a/internal/tools/proto/go.mod b/internal/tools/proto/go.mod index ee2391e48e0ac..1785111b4814f 100644 --- a/internal/tools/proto/go.mod +++ b/internal/tools/proto/go.mod @@ -3,6 +3,7 @@ module github.com/DataDog/datadog-agent/internal/tools/proto go 1.18 require ( + github.com/favadi/protoc-go-inject-tag v1.4.0 github.com/golang/mock v1.5.0 github.com/golang/protobuf v1.5.2 github.com/grpc-ecosystem/grpc-gateway v1.12.2 @@ -21,6 +22,6 @@ require ( golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c // indirect - google.golang.org/protobuf v1.26.0 // indirect + google.golang.org/protobuf v1.28.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/internal/tools/proto/go.sum b/internal/tools/proto/go.sum index dbcac02d8b4c1..439cd0cdc9d30 100644 --- a/internal/tools/proto/go.sum +++ b/internal/tools/proto/go.sum @@ -2,6 +2,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/favadi/protoc-go-inject-tag v1.4.0 h1:K3KXxbgRw5WT4f43LbglARGz/8jVsDOS7uMjG4oNvXY= +github.com/favadi/protoc-go-inject-tag v1.4.0/go.mod h1:AZ+PK+QDKUOLlBRG0rYiKkUX5Hw7+7GTFzlU99GFSbQ= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= @@ -78,8 +80,9 @@ google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZi google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/internal/tools/proto/tools.go b/internal/tools/proto/tools.go index 7150f4e76259f..985266391df97 100644 --- a/internal/tools/proto/tools.go +++ b/internal/tools/proto/tools.go @@ -16,6 +16,7 @@ package proto // conflicts with the pins set here. import ( + _ "github.com/favadi/protoc-go-inject-tag" _ "github.com/golang/mock/mockgen" _ "github.com/golang/protobuf/protoc-gen-go" _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway" diff --git a/omnibus/config/patches/openscap/0042-Plug-a-memory-leak.patch b/omnibus/config/patches/openscap/0042-Plug-a-memory-leak.patch new file mode 100644 index 0000000000000..48a882f402269 --- /dev/null +++ b/omnibus/config/patches/openscap/0042-Plug-a-memory-leak.patch @@ -0,0 +1,61 @@ +From 1c2863cde0481d77b7f45f90e48db5ce1497372a Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jan=20=C4=8Cern=C3=BD?= +Date: Tue, 9 May 2023 16:43:54 +0200 +Subject: [PATCH 42/43] Plug a memory leak + +When there already exists a value under the given key in the +hash table, oscap_htable_add doesn't put the value to the hash table +and therefore the value isn't freed when the hash table is freed. +The caller of oscap_htable_add needs to check if oscap_htable_add +failed and in this situation is responsible to free the value. + +Addressing: + +oscap xccdf eval --profile '(all)' --rule xccdf_org.ssgproject.content_rule_accounts_tmout /usr/share/xml/scap/ssg/content/ssg-fedora-ds.xml +--- Starting Evaluation --- + +Title Set Interactive Session Timeout +Rule xccdf_org.ssgproject.content_rule_accounts_tmout +Result fail + +================================================================= +==85219==ERROR: LeakSanitizer: detected memory leaks + +Direct leak of 49 byte(s) in 1 object(s) allocated from: + #0 0x4a3198 in strdup (/home/jcerny/work/git/openscap/build/utils/oscap+0x4a3198) (BuildId: 329fd48580c8ee52863c16be406cb9d7c3df95db) + #1 0x7f090491f20c in oscap_strdup /home/jcerny/work/git/openscap/src/common/util.h:312:9 + #2 0x7f090491e9dd in ds_sds_dump_component_ref_as /home/jcerny/work/git/openscap/src/DS/sds.c:510:26 + #3 0x7f090491efce in ds_sds_dump_component_ref_as /home/jcerny/work/git/openscap/src/DS/sds.c:574:8 + #4 0x7f090491f7d3 in ds_sds_dump_component_ref /home/jcerny/work/git/openscap/src/DS/sds.c:601:15 + #5 0x7f0904917305 in ds_sds_session_register_component_with_dependencies /home/jcerny/work/git/openscap/src/DS/ds_sds_session.c:327:10 + #6 0x7f0904a0493c in xccdf_session_load_cpe /home/jcerny/work/git/openscap/src/XCCDF/xccdf_session.c:921:8 + #7 0x7f0904a03dc7 in xccdf_session_load /home/jcerny/work/git/openscap/src/XCCDF/xccdf_session.c:705:14 + #8 0x53333f in app_evaluate_xccdf /home/jcerny/work/git/openscap/utils/oscap-xccdf.c:641:6 + #9 0x52fedb in oscap_module_call /home/jcerny/work/git/openscap/utils/oscap-tool.c:295:10 + #10 0x5307fb in oscap_module_process /home/jcerny/work/git/openscap/utils/oscap-tool.c:389:19 + #11 0x53cee0 in main /home/jcerny/work/git/openscap/utils/oscap.c:88:15 + #12 0x7f090390950f in __libc_start_call_main (/lib64/libc.so.6+0x2750f) (BuildId: 81daba31ee66dbd63efdc4252a872949d874d136) + +SUMMARY: AddressSanitizer: 49 byte(s) leaked in 1 allocation(s). +--- + src/DS/sds.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/src/DS/sds.c b/src/DS/sds.c +index c82638962..c640c5452 100644 +--- a/src/DS/sds.c ++++ b/src/DS/sds.c +@@ -509,7 +509,9 @@ int ds_sds_dump_component_ref_as(const xmlNodePtr component_ref, struct ds_sds_s + // make a copy of xlink_href because ds_sds_dump_component_by_href modifies its second argument + char *xlink_href_copy = oscap_strdup(xlink_href); + int ret = ds_sds_dump_component_by_href(session, xlink_href, target_filename_dirname, relative_filepath, cref_id, &component_id); +- oscap_htable_add(ds_sds_session_get_component_uris(session), cref_id, xlink_href_copy); ++ if (!oscap_htable_add(ds_sds_session_get_component_uris(session), cref_id, xlink_href_copy)) { ++ free(xlink_href_copy); ++ } + + xmlFree(xlink_href); + xmlFree(cref_id); +-- +2.34.1 + diff --git a/omnibus/config/patches/openscap/0043-Fix-other-occurences-of-oscap_htable_add.patch b/omnibus/config/patches/openscap/0043-Fix-other-occurences-of-oscap_htable_add.patch new file mode 100644 index 0000000000000..eb587510da0b3 --- /dev/null +++ b/omnibus/config/patches/openscap/0043-Fix-other-occurences-of-oscap_htable_add.patch @@ -0,0 +1,50 @@ +From b4ada9f12ebcc778dff5a63cbdf594c22cbb75f5 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jan=20=C4=8Cern=C3=BD?= +Date: Tue, 16 May 2023 14:48:20 +0200 +Subject: [PATCH 43/43] Fix other occurences of oscap_htable_add + +If the hash table owns its elements, its responsible for freeing +them. The callers of oscap_htable_add rely on the fact that the +elements will be later freed by oscap_htable_free. However, if +oscap_htable_add fails to insert the elements to the table, +then the caller needs to free them. +--- + src/DS/rds.c | 5 ++++- + src/OVAL/oval_generator.c | 5 ++++- + 2 files changed, 8 insertions(+), 2 deletions(-) + +diff --git a/src/DS/rds.c b/src/DS/rds.c +index 5ec98daa2..d2553b2db 100644 +--- a/src/DS/rds.c ++++ b/src/DS/rds.c +@@ -888,7 +888,10 @@ int ds_rds_create(const char* sds_file, const char* xccdf_result_file, const cha + result = -1; + oscap_source_free(oval_source); + } else { +- oscap_htable_add(oval_result_sources, *oval_result_files, oval_source); ++ if (!oscap_htable_add(oval_result_sources, *oval_result_files, oval_source)) { ++ result = -1; ++ oscap_source_free(oval_source); ++ } + } + oval_result_files++; + } +diff --git a/src/OVAL/oval_generator.c b/src/OVAL/oval_generator.c +index 267f83037..e423a9551 100644 +--- a/src/OVAL/oval_generator.c ++++ b/src/OVAL/oval_generator.c +@@ -171,7 +171,10 @@ void oval_generator_update_timestamp(struct oval_generator *generator) + + void oval_generator_add_platform_schema_version(struct oval_generator *generator, const char *platform, const char *schema_version) + { +- oscap_htable_add(generator->platform_schema_versions, platform, oscap_strdup(schema_version)); ++ char *schema_version_dup = oscap_strdup(schema_version); ++ if (!oscap_htable_add(generator->platform_schema_versions, platform, schema_version_dup)) { ++ free(schema_version_dup); ++ } + } + + +-- +2.34.1 + diff --git a/omnibus/config/patches/rpm/rpmdb-no-create.patch b/omnibus/config/patches/rpm/rpmdb-no-create.patch new file mode 100644 index 0000000000000..5837316ef9589 --- /dev/null +++ b/omnibus/config/patches/rpm/rpmdb-no-create.patch @@ -0,0 +1,25 @@ +--- a/lib/rpmdb.c ++++ b/lib/rpmdb.c +@@ -463,6 +463,11 @@ static int openDatabase(const char * prefix, + if (db == NULL) + return 1; + ++ /* Don't create db if it doesn't exist already */ ++ struct stat st; ++ if (stat(rpmdbHome(db), &st) < 0) ++ return 1; ++ + /* Try to ensure db home exists, error out if we can't even create */ + rc = rpmioMkpath(rpmdbHome(db), 0755, getuid(), getgid()); + if (rc == 0) { +--- a/lib/rpmts.c ++++ b/lib/rpmts.c +@@ -104,7 +104,7 @@ int rpmtsOpenDB(rpmts ts, int dbmode) + rc = rpmdbOpen(ts->rootDir, &ts->rdb, ts->dbmode, 0644); + if (rc) { + char * dn = rpmGetPath(ts->rootDir, "%{_dbpath}", NULL); +- rpmlog(RPMLOG_ERR, _("cannot open Packages database in %s\n"), dn); ++ rpmlog(RPMLOG_DEBUG, _("cannot open Packages database in %s\n"), dn); + free(dn); + } + return rc; diff --git a/omnibus/config/projects/agent.rb b/omnibus/config/projects/agent.rb index 3130e0db94f4f..b95bad6b0adcb 100644 --- a/omnibus/config/projects/agent.rb +++ b/omnibus/config/projects/agent.rb @@ -215,6 +215,12 @@ include_sysprobe = "true" additional_sign_files_list << "#{Omnibus::Config.source_dir()}\\datadog-agent\\src\\github.com\\DataDog\\datadog-agent\\bin\\agent\\system-probe.exe" end + + include_apminject = "false" + if not windows_arch_i386? and ENV['WINDOWS_APMINJECT_MODULE'] and not ENV['WINDOWS_APMINJECT_MODULE'].empty? + include_apminject = "true" + end + additional_sign_files additional_sign_files_list parameters({ 'InstallDir' => install_dir, @@ -225,6 +231,7 @@ 'IncludePython3' => "#{with_python_runtime? '3'}", 'Platform' => "#{arch}", 'IncludeSysprobe' => "#{include_sysprobe}", + 'IncludeAPMInject' => "#{include_apminject}" }) # This block runs before harvesting with heat.exe # It runs in the scope of the packager, so all variables access are from the point-of-view of the packager. @@ -279,6 +286,9 @@ def generate_embedded_archive(version) if ENV['WINDOWS_DDNPM_DRIVER'] and not ENV['WINDOWS_DDNPM_DRIVER'].empty? dependency 'datadog-windows-filter-driver' end + if ENV['WINDOWS_APMINJECT_MODULE'] and not ENV['WINDOWS_APMINJECT_MODULE'].empty? + dependency 'datadog-windows-apminject' + end if ENV['WINDOWS_DDPROCMON_DRIVER'] and not ENV['WINDOWS_DDPROCMON_DRIVER'].empty? dependency 'datadog-windows-procmon-driver' end diff --git a/omnibus/config/software/datadog-windows-apminject.rb b/omnibus/config/software/datadog-windows-apminject.rb new file mode 100644 index 0000000000000..e43dd065f9dd9 --- /dev/null +++ b/omnibus/config/software/datadog-windows-apminject.rb @@ -0,0 +1,26 @@ +# Unless explicitly stated otherwise all files in this repository are licensed +# under the Apache License Version 2.0. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2016-present Datadog, Inc. + +name "datadog-windows-apminject" +# at this moment,builds are stored by branch name. Will need to correct at some point + + +default_version "master" +# +# this should only ever be included by a windows build. +if ohai["platform"] == "windows" + driverpath = ENV['WINDOWS_APMINJECT_MODULE'] + driverver = ENV['WINDOWS_APMINJECT_VERSION'] + drivermsmsha = ENV['WINDOWS_APMINJECT_SHASUM'] + + source :url => "https://s3.amazonaws.com/dd-windowsfilter/builds/#{driverpath}/ddapminstall-#{driverver}.msm", + :sha256 => "#{drivermsmsha}", + :target_filename => "ddapminstall.msm" + + build do + copy "ddapminstall.msm", "#{Omnibus::Config.source_dir()}/datadog-agent/src/github.com/DataDog/datadog-agent/bin/agent/ddapminstall.msm" + end + +end \ No newline at end of file diff --git a/omnibus/config/software/openscap.rb b/omnibus/config/software/openscap.rb index fc60a57828c0a..e37c206bbd4b6 100644 --- a/omnibus/config/software/openscap.rb +++ b/omnibus/config/software/openscap.rb @@ -52,6 +52,8 @@ patch source: "0036-Fix-leak-of-regex-structure-in-oval_fts-in-error-cas.patch", env: env patch source: "0037-Free-xmlDoc-structure-at-the-end-of-xccdf_session_lo.patch", env: env patch source: "0041-Fix-implicitly-declared-function.patch", env: env + patch source: "0042-Plug-a-memory-leak.patch", env: env + patch source: "0043-Fix-other-occurences-of-oscap_htable_add.patch", env: env patch source: "get_results_from_session.patch", env: env # add a function to retrieve results from session patch source: "session_result_free.patch", env: env # add a function to free results from session diff --git a/omnibus/config/software/rpm.rb b/omnibus/config/software/rpm.rb index cc0057d57a043..00da9c9147e09 100644 --- a/omnibus/config/software/rpm.rb +++ b/omnibus/config/software/rpm.rb @@ -49,6 +49,7 @@ env["CFLAGS"] << " -fPIC" patch source: "0001-Include-fcntl.patch", env: env # fix build + patch source: "rpmdb-no-create.patch", env: env # don't create db if it doesn't exist already update_config_guess diff --git a/omnibus/package-scripts/agent-rpm/posttrans b/omnibus/package-scripts/agent-rpm/posttrans index b28850580fed1..373458083b802 100755 --- a/omnibus/package-scripts/agent-rpm/posttrans +++ b/omnibus/package-scripts/agent-rpm/posttrans @@ -80,7 +80,12 @@ if [ ! -f "$CONFIG_DIR/install_info" ]; then if command -v rpm >/dev/null 2>&1; then tool=rpm - tool_version=rpm-$(rpm -q --qf "%{VERSION}" rpm || echo "unknown") + # Omnibus will put this script verbatim inside RPM specfile before building. + # We need to escape the "%" so that the rpm build machinery doesn't expand + # it as a macro (which would expand to the version of Agent being built). + # NOTE: on some distros (e.g. opensuse 15.4), "rpm" package doesn't exist, + # it's called "rpm-ndb". We query version of package which contains /bin/rpm file. + tool_version=rpm-$(rpm -q -f /bin/rpm --queryformat "%%{VERSION}" || echo "unknown") else tool=unknown tool_version=unknown diff --git a/omnibus/package-scripts/agent-rpm/preinst b/omnibus/package-scripts/agent-rpm/preinst index de5f4041b4908..c8bccf324140b 100755 --- a/omnibus/package-scripts/agent-rpm/preinst +++ b/omnibus/package-scripts/agent-rpm/preinst @@ -43,12 +43,15 @@ if [ -f "/etc/init.d/datadog-agent" ]; then fi # Set up `dd-agent` user and group -getent group dd-agent >/dev/null || groupadd -r dd-agent -getent passwd dd-agent >/dev/null || \ - useradd -r -M -g dd-agent -d $INSTALL_DIR -s /sbin/nologin \ - -c "Datadog Agent" dd-agent && \ - { usermod -L dd-agent || echo "[ WARNING ]\tCannot lock the 'dd-agent' user account"; } - +if ! getent group dd-agent >/dev/null; then + groupadd -r dd-agent +fi +if ! getent passwd dd-agent >/dev/null; then + useradd -r -M -g dd-agent -d $INSTALL_DIR -s /sbin/nologin -c "Datadog Agent" dd-agent + if ! usermod -L dd-agent; then + echo "[ WARNING ]\tCannot lock the 'dd-agent' user account" + fi +fi # Starting with 6.10, integrations are also uninstalled on package removal diff --git a/omnibus/package-scripts/dogstatsd-rpm/preinst b/omnibus/package-scripts/dogstatsd-rpm/preinst index 8afc753dbf168..e6c0cd1d1aa51 100755 --- a/omnibus/package-scripts/dogstatsd-rpm/preinst +++ b/omnibus/package-scripts/dogstatsd-rpm/preinst @@ -22,10 +22,14 @@ if [ -f "/lib/systemd/system/$SERVICE_NAME.service" ] || [ -f "/usr/lib/systemd/ fi # Set up `dd-agent` user and group -getent group dd-agent >/dev/null || groupadd -r dd-agent -getent passwd dd-agent >/dev/null || \ - useradd -r -M -g dd-agent -d $INSTALL_DIR -s /sbin/nologin \ - -c "Datadog Agent" dd-agent && \ - { usermod -L dd-agent || echo "[ WARNING ]\tCannot lock the 'dd-agent' user account"; } +if ! getent group dd-agent >/dev/null; then + groupadd -r dd-agent +fi +if ! getent passwd dd-agent >/dev/null; then + useradd -r -M -g dd-agent -d $INSTALL_DIR -s /sbin/nologin -c "Datadog Agent" dd-agent + if ! usermod -L dd-agent; then + echo "[ WARNING ]\tCannot lock the 'dd-agent' user account" + fi +fi exit 0 diff --git a/omnibus/package-scripts/iot-agent-rpm/posttrans b/omnibus/package-scripts/iot-agent-rpm/posttrans index 6ac7dd20ba25f..51d2d0402b5d0 100755 --- a/omnibus/package-scripts/iot-agent-rpm/posttrans +++ b/omnibus/package-scripts/iot-agent-rpm/posttrans @@ -29,7 +29,12 @@ if [ ! -f "$CONFIG_DIR/install_info" ]; then if command -v rpm >/dev/null 2>&1; then tool=rpm - tool_version=rpm-$(rpm -q --qf "%{VERSION}" rpm || echo "unknown") + # Omnibus will put this script verbatim inside RPM specfile before building. + # We need to escape the "%" so that the rpm build machinery doesn't expand + # it as a macro (which would expand to the version of Agent being built). + # NOTE: on some distros (e.g. opensuse 15.4), "rpm" package doesn't exist, + # it's called "rpm-ndb". We query version of package which contains /bin/rpm file. + tool_version=rpm-$(rpm -q -f /bin/rpm --queryformat "%%{VERSION}" || echo "unknown") else tool=unknown tool_version=unknown diff --git a/omnibus/package-scripts/iot-agent-rpm/preinst b/omnibus/package-scripts/iot-agent-rpm/preinst index 6354547806b44..48cbc67fb1157 100755 --- a/omnibus/package-scripts/iot-agent-rpm/preinst +++ b/omnibus/package-scripts/iot-agent-rpm/preinst @@ -22,10 +22,14 @@ if [ -f "/lib/systemd/system/$SERVICE_NAME.service" ] || [ -f "/usr/lib/systemd/ fi # Set up `dd-agent` user and group -getent group dd-agent >/dev/null || groupadd -r dd-agent -getent passwd dd-agent >/dev/null || \ - useradd -r -M -g dd-agent -d $INSTALL_DIR -s /sbin/nologin \ - -c "Datadog Agent" dd-agent && \ - { usermod -L dd-agent || echo "[ WARNING ]\tCannot lock the 'dd-agent' user account"; } +if ! getent group dd-agent >/dev/null; then + groupadd -r dd-agent +fi +if ! getent passwd dd-agent >/dev/null; then + useradd -r -M -g dd-agent -d $INSTALL_DIR -s /sbin/nologin -c "Datadog Agent" dd-agent + if ! usermod -L dd-agent; then + echo "[ WARNING ]\tCannot lock the 'dd-agent' user account" + fi +fi exit 0 diff --git a/omnibus/resources/agent/msi/localbuild/parameters.wxi b/omnibus/resources/agent/msi/localbuild/parameters.wxi index a2a3597c33f27..68dc56b437b92 100644 --- a/omnibus/resources/agent/msi/localbuild/parameters.wxi +++ b/omnibus/resources/agent/msi/localbuild/parameters.wxi @@ -10,6 +10,7 @@ + diff --git a/omnibus/resources/agent/msi/source.wxs.erb b/omnibus/resources/agent/msi/source.wxs.erb index 6c49eb50f5a6c..b98dfb2f674ef 100644 --- a/omnibus/resources/agent/msi/source.wxs.erb +++ b/omnibus/resources/agent/msi/source.wxs.erb @@ -39,6 +39,8 @@ InstallPrivileges="elevated" Compressed="yes" /> + = 602 AND WindowsBuild >= 9200) AND MsiNTProductType >= 2) OR ((VersionNT64 >= 603 AND WindowsBuild >= 9600) AND MsiNTProductType = 1))]]> + --> + + + + @@ -491,9 +497,14 @@ - - - + + + + + + + + 0 && bytesPerContext > 0 { + cgroupLimit, err := cgroupLimitGetter() + if err != nil { + log.Errorf("dogstatsd context limiter: memory based limit configured, but: %v", err) + } else { + limit = int(memoryRatio*float64(cgroupLimit)) / bytesPerContext + log.Debugf("dogstatsd context limiter: memory limit=%d, ratio=%f, contexts limit=%d", cgroupLimit, memoryRatio, limit) + } + } + + if limit == 0 { + limit = config.Datadog.GetInt("dogstatsd_context_limiter.limit") + log.Debugf("dogstatsd context limiter: using fixed global limit %d", limit) + } + + if pipelineCount > 0 { + limit = limit / pipelineCount + } + + return NewGlobal( + limit, + config.Datadog.GetInt("dogstatsd_context_limiter.entry_timeout"), + config.Datadog.GetString("dogstatsd_context_limiter.key_tag_name"), + config.Datadog.GetStringSlice("dogstatsd_context_limiter.telemetry_tag_names"), + ) +} diff --git a/pkg/aggregator/internal/limiter/config_test.go b/pkg/aggregator/internal/limiter/config_test.go new file mode 100644 index 0000000000000..5fee6fc238084 --- /dev/null +++ b/pkg/aggregator/internal/limiter/config_test.go @@ -0,0 +1,44 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package limiter + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/DataDog/datadog-agent/pkg/config" +) + +var mockError = errors.New("mock") + +func TestConfig(t *testing.T) { + m := config.Mock(t) + + // no configuration, disabled by default + l := fromConfig(1, true, func() (uint64, error) { return 0, mockError }) + assert.Nil(t, l) + + // static limit + m.Set("dogstatsd_context_limiter.limit", 500) + l = fromConfig(1, true, func() (uint64, error) { return 0, mockError }) + assert.Equal(t, 500, l.global) + + // fallback to static limit with error + m.Set("dogstatsd_context_limiter.cgroup_memory_ratio", 0.5) + l = fromConfig(1, true, func() (uint64, error) { return 0, mockError }) + assert.Equal(t, 500, l.global) + + // memory based limit + m.Set("dogstatsd_context_limiter.bytes_per_context", 1500) + l = fromConfig(1, true, func() (uint64, error) { return 3_000_000, nil }) + assert.Equal(t, 1000, l.global) + + // non-core agents + l = fromConfig(1, false, func() (uint64, error) { return 3_000_000, nil }) + assert.Nil(t, l) +} diff --git a/pkg/aggregator/internal/limiter/limiter.go b/pkg/aggregator/internal/limiter/limiter.go new file mode 100644 index 0000000000000..fe7396b493f07 --- /dev/null +++ b/pkg/aggregator/internal/limiter/limiter.go @@ -0,0 +1,270 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package limiter + +import ( + "math" + "strings" + + "github.com/DataDog/datadog-agent/pkg/metrics" + "github.com/DataDog/datadog-agent/pkg/tagset" +) + +type entry struct { + current int // number of contexts currently in aggregator + rejected int // number of rejected samples + + lastExpireCount int // expireCount when seen last + + telemetryTags []string +} + +// Limiter tracks number of contexts based on origin detection metrics +// and rejects samples if the number goes over the limit. +// +// Not thread safe. +type Limiter struct { + keyTagName string + telemetryTagNames []string + limit int + global int // global limit + current int // sum(usage[*].current) + usage map[string]*entry + + // expireCount ensure eventual removal of entries that created an entry, but were never able + // to create contexts due to the global limit. + expireCount int + expireCountInterval int +} + +// New returns a limiter with a per-sender limit. +// +// limit is the maximum number of contexts per sender. If zero or less, the limiter is disabled. +// +// keyTagName is the origin-detection tag name that will be used to identify the senders. Contexts +// that have no tag will be tracked as a single sender and the limit will still be applied. +// +// telemetryTagNames are additional tags that will be copied to the per-sender telemetry. Telemetry +// tags should have the same values for all containers that have the same key tag value and will be +// tracked as a single origin (e.g. if key is pod_name, then kube_namespace and kube_deployment are +// valid telemetry tags, but container_id is not). Only tags from the first sample will be used for +// all telemetry for the given sender. +func New(limit int, keyTagName string, telemetryTagNames []string) *Limiter { + if limit <= 0 { + return nil + } + + return newLimiter(limit, math.MaxInt, 0, keyTagName, telemetryTagNames) +} + +// NewGlobal returns a limiter with a global limit which will be equally split between senders +// will be equally distributed between origins. +func NewGlobal(global int, expireCountInterval int, key string, tags []string) *Limiter { + if global <= 0 || global == math.MaxInt { + return nil + } + + return newLimiter(0, global, expireCountInterval, key, tags) +} + +func newLimiter(limit, global int, expireCountInterval int, keyTagName string, telemetryTagNames []string) *Limiter { + // Make sure all names end with a colon, so we don't accidentally match a part of the tag name, only the full name. + // e.g. keyTagName="pod_name" should not match the tag "pod_name_alias:foo" + if !strings.HasSuffix(keyTagName, ":") { + keyTagName += ":" + } + + hasKey := false + telemetryTagNames = append([]string{}, telemetryTagNames...) + for i := range telemetryTagNames { + if !strings.HasSuffix(telemetryTagNames[i], ":") { + telemetryTagNames[i] += ":" + } + hasKey = hasKey || keyTagName == telemetryTagNames[i] + } + + // Make sure key tag is always set on the telemetry metrics. + if !hasKey { + telemetryTagNames = append(telemetryTagNames, keyTagName) + } + + return &Limiter{ + keyTagName: keyTagName, + telemetryTagNames: telemetryTagNames, + limit: limit, + global: global, + usage: map[string]*entry{}, + expireCountInterval: expireCountInterval, + } +} + +// getSenderId finds sender identifier given a set of origin detection tags. +// +// If the key tag is not found, returns empty string. +func (l *Limiter) getSenderId(tags []string) string { + for _, t := range tags { + if strings.HasPrefix(t, l.keyTagName) { + return t + } + } + return "" +} + +// extractTelemetryTags returns a slice of tags that have l.telemetryTagNames prefixes. +func (l *Limiter) extractTelemetryTags(src []string) []string { + dst := make([]string, 0, len(l.telemetryTagNames)) + + for _, t := range src { + for _, p := range l.telemetryTagNames { + if strings.HasPrefix(t, p) { + dst = append(dst, t) + } + } + } + + return dst +} + +func (l *Limiter) updateLimit() { + if l.global < math.MaxInt && len(l.usage) > 0 { + l.limit = l.global / len(l.usage) + } +} + +// Track is called for each new context. Returns true if the sample should be accepted, false +// otherwise. +func (l *Limiter) Track(tags []string) bool { + if l == nil { + return true + } + + id := l.getSenderId(tags) + + e := l.usage[id] + if e == nil { + e = &entry{ + telemetryTags: l.extractTelemetryTags(tags), + } + l.usage[id] = e + l.updateLimit() + } + + e.lastExpireCount = l.expireCount + + if e.current >= l.limit || l.current >= l.global { + e.rejected++ + return false + } + + l.current++ + e.current++ + return true +} + +// Remove is called when context is expired to decrement current usage. +func (l *Limiter) Remove(tags []string) { + if l == nil { + return + } + + id := l.getSenderId(tags) + + if e := l.usage[id]; e != nil { + l.current-- + e.current-- + if e.current <= 0 { + delete(l.usage, id) + l.updateLimit() + } + } +} + +// IsOverLimit returns true if the context sender is over the limit and the context should be +// dropped. +func (l *Limiter) IsOverLimit(tags []string) bool { + if l == nil { + return false + } + + if e := l.usage[l.getSenderId(tags)]; e != nil { + return e.current > l.limit + } + + return false +} + +// ExpireEntries is called once per flush cycle to do internal bookkeeping and cleanups. +func (l *Limiter) ExpireEntries() { + if l == nil { + return + } + + if l.expireCountInterval >= 0 { + l.expireCount++ + tooOld := l.expireCount - l.expireCountInterval + for id, e := range l.usage { + if e.current == 0 && e.lastExpireCount < tooOld { + delete(l.usage, id) + l.updateLimit() + } + } + } +} + +// SendTelemetry appends limiter metrics to the series sink. +func (l *Limiter) SendTelemetry(timestamp float64, series metrics.SerieSink, hostname string, constTags []string) { + if l == nil { + return + } + + droppedTags := append([]string{}, constTags...) + droppedTags = append(droppedTags, "reason:too_many_contexts") + + series.Append(&metrics.Serie{ + Name: "datadog.agent.aggregator.dogstatsd_context_limiter.num_origins", + Host: hostname, + Tags: tagset.NewCompositeTags(constTags, nil), + MType: metrics.APIGaugeType, + Points: []metrics.Point{{Ts: timestamp, Value: float64(len(l.usage))}}, + }) + + if l.global < math.MaxInt { + series.Append(&metrics.Serie{ + Name: "datadog.agent.aggregator.dogstatsd_context_limiter.global_limit", + Host: hostname, + Tags: tagset.NewCompositeTags(constTags, nil), + MType: metrics.APIGaugeType, + Points: []metrics.Point{{Ts: timestamp, Value: float64(l.global)}}, + }) + } + + for _, e := range l.usage { + series.Append(&metrics.Serie{ + Name: "datadog.agent.aggregator.dogstatsd_context_limiter.limit", + Host: hostname, + Tags: tagset.NewCompositeTags(constTags, e.telemetryTags), + MType: metrics.APIGaugeType, + Points: []metrics.Point{{Ts: timestamp, Value: float64(l.limit)}}, + }) + + series.Append(&metrics.Serie{ + Name: "datadog.agent.aggregator.dogstatsd_context_limiter.current", + Host: hostname, + Tags: tagset.NewCompositeTags(constTags, e.telemetryTags), + MType: metrics.APIGaugeType, + Points: []metrics.Point{{Ts: timestamp, Value: float64(e.current)}}, + }) + + series.Append(&metrics.Serie{ + Name: "datadog.agent.aggregator.dogstatsd_samples_dropped", + Host: hostname, + Tags: tagset.NewCompositeTags(droppedTags, e.telemetryTags), + MType: metrics.APICountType, + Points: []metrics.Point{{Ts: timestamp, Value: float64(e.rejected)}}, + }) + e.rejected = 0 + } +} diff --git a/pkg/aggregator/internal/limiter/limiter_test.go b/pkg/aggregator/internal/limiter/limiter_test.go new file mode 100644 index 0000000000000..1943aaf25cb24 --- /dev/null +++ b/pkg/aggregator/internal/limiter/limiter_test.go @@ -0,0 +1,100 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package limiter + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestLimiter(t *testing.T) { + l := New(1, "pod", []string{"srv"}) + + // check that: + // - unrelated tags are not used + // - tags without values are not used + // - missing tag maps to a the same identity + + a := assert.New(t) + + a.Equal(l.telemetryTagNames, []string{"srv:", "pod:"}) + + a.True(l.Track([]string{"srv:foo", "cid:1", "pod", "pod:foo"})) + a.False(l.Track([]string{"srv:foo", "cid:2", "pod", "pod:foo"})) + + a.True(l.Track([]string{"srv:foo", "cid:3", "pod", "pod:bar"})) + a.False(l.Track([]string{"srv:foo", "cid:4", "pod", "pod:bar"})) + + a.True(l.Track([]string{"srv:foo", "cid:5", "pod"})) + a.False(l.Track([]string{"srv:foo", "cid:6", "pod"})) + a.False(l.Track([]string{})) + + l.Remove([]string{}) + a.True(l.Track([]string{})) + + l.Remove([]string{"srv:bar", "pod:foo"}) + a.True(l.Track([]string{"srv:bar", "pod:foo"})) + + a.Equal(&entry{ + current: 1, + rejected: 0, + telemetryTags: []string{"srv:bar", "pod:foo"}, + }, l.usage["pod:foo"]) + + l.Remove([]string{"pod:foo"}) + a.Nil(l.usage["pod:foo"]) +} + +func TestGlobal(t *testing.T) { + l := NewGlobal(2, 1, "pod", []string{}) + a := assert.New(t) + + a.True(l.Track([]string{"pod:foo"})) + a.True(l.Track([]string{"pod:foo"})) + a.False(l.Track([]string{"pod:foo"})) + a.False(l.Track([]string{"pod:bar"})) // would exceed global limit + + l.Remove([]string{"pod:foo"}) + + a.False(l.Track([]string{"pod:foo"})) // would exceed per-origin limit + + a.True(l.Track([]string{"pod:bar"})) + a.False(l.Track([]string{"pod:bar"})) // would exceed per-origin limit + + l.Remove([]string{"pod:bar"}) // removes origin entry, limit is 2 again + a.True(l.Track([]string{"pod:foo"})) + + // check for division by zero + l.Remove([]string{"pod:foo"}) + l.Remove([]string{"pod:foo"}) + a.Equal(0, len(l.usage)) +} + +func TestExpire(t *testing.T) { + l := NewGlobal(2, 1, "pod", []string{}) + a := assert.New(t) + + foo := []string{"pod:foo"} + bar := []string{"pod:bar"} + + a.True(l.Track(foo)) + a.True(l.Track(foo)) + a.False(l.Track(bar)) // rejected, but allocates limit to bar + + l.ExpireEntries() + + l.Remove(foo) + // maxAge 1 means limit remains reserved for 1 tick after initial sample + a.False(l.Track(foo)) + a.Len(l.usage, 2) + + l.ExpireEntries() + + a.Len(l.usage, 1) + l.Remove([]string{"pod:foo"}) + a.True(l.Track([]string{"pod:foo"})) +} diff --git a/pkg/aggregator/internal/limiter/memory.go b/pkg/aggregator/internal/limiter/memory.go new file mode 100644 index 0000000000000..94322b7d4769e --- /dev/null +++ b/pkg/aggregator/internal/limiter/memory.go @@ -0,0 +1,16 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !linux + +package limiter + +import ( + "errors" +) + +func getCgroupMemoryLimit() (uint64, error) { + return 0, errors.New("cgroup memory limit is only supported on Linux") +} diff --git a/pkg/aggregator/internal/limiter/memory_linux.go b/pkg/aggregator/internal/limiter/memory_linux.go new file mode 100644 index 0000000000000..d51c3accb28ef --- /dev/null +++ b/pkg/aggregator/internal/limiter/memory_linux.go @@ -0,0 +1,35 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux + +package limiter + +import ( + "errors" + + "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/util/cgroups" +) + +func getCgroupMemoryLimit() (uint64, error) { + selfReader, err := cgroups.NewSelfReader("/proc", config.IsContainerized()) + if err != nil { + return 0, err + } + cgroup := selfReader.GetCgroup(cgroups.SelfCgroupIdentifier) + if cgroup == nil { + return 0, errors.New("cannot get cgroup") + } + var stats cgroups.MemoryStats + if err := cgroup.GetMemoryStats(&stats); err != nil { + return 0, err + } + if stats.Limit == nil || *stats.Limit == 0 { + return 0, errors.New("cannot get cgroup memory limit") + } + + return *stats.Limit, nil +} diff --git a/pkg/aggregator/internal/tags_limiter/tags_limiter.go b/pkg/aggregator/internal/tags_limiter/tags_limiter.go new file mode 100644 index 0000000000000..b47e5f9f6299f --- /dev/null +++ b/pkg/aggregator/internal/tags_limiter/tags_limiter.go @@ -0,0 +1,76 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package tags_limiter + +import ( + "github.com/DataDog/datadog-agent/pkg/aggregator/ckey" + "github.com/DataDog/datadog-agent/pkg/metrics" + "github.com/DataDog/datadog-agent/pkg/tagset" +) + +type entry struct { + count uint64 + tags []string +} + +type Limiter struct { + limit int + dropped map[ckey.TagsKey]*entry +} + +func New(limit int) *Limiter { + if limit <= 0 { + return nil + } + + return &Limiter{ + limit: limit, + dropped: map[ckey.TagsKey]*entry{}, + } +} + +func (l *Limiter) Check(taggerKey ckey.TagsKey, taggerTags, metricTags []string) bool { + if l == nil { + return true + } + + if len(taggerTags)+len(metricTags) > l.limit { + if e, ok := l.dropped[taggerKey]; !ok { + e = &entry{ + count: 1, + tags: taggerTags, + } + l.dropped[taggerKey] = e + } else { + e.count++ + } + + return false + } + + return true +} + +func (l *Limiter) SendTelemetry(timestamp float64, series metrics.SerieSink, hostname string, constTags []string) { + if l == nil { + return + } + + constTags = append([]string{}, constTags...) + constTags = append(constTags, "reason:too_many_tags") + + for _, e := range l.dropped { + series.Append(&metrics.Serie{ + Name: "datadog.agent.aggregator.dogstatsd_samples_dropped", + Host: hostname, + Tags: tagset.NewCompositeTags(constTags, e.tags), + MType: metrics.APICountType, + Points: []metrics.Point{{Ts: timestamp, Value: float64(e.count)}}, + }) + } + + l.dropped = map[ckey.TagsKey]*entry{} +} diff --git a/pkg/aggregator/internal/tags_limiter/tags_limiter_test.go b/pkg/aggregator/internal/tags_limiter/tags_limiter_test.go new file mode 100644 index 0000000000000..3c5115b28f7eb --- /dev/null +++ b/pkg/aggregator/internal/tags_limiter/tags_limiter_test.go @@ -0,0 +1,24 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package tags_limiter + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTagsLimiter(t *testing.T) { + const limit int = 5 + l := New(limit) + for n := 0; n <= 10; n += 2 { + for i := 0; i <= n; i++ { + taggerTags := make([]string, i) + metricTags := make([]string, n-i) + assert.Equal(t, n < limit, l.Check(0, taggerTags, metricTags)) + } + } +} diff --git a/pkg/aggregator/mocksender/mocked_methods.go b/pkg/aggregator/mocksender/mocked_methods.go index ef6a74fce1f9f..4e8c654043399 100644 --- a/pkg/aggregator/mocksender/mocked_methods.go +++ b/pkg/aggregator/mocksender/mocked_methods.go @@ -96,6 +96,10 @@ func (m *MockSender) SetCheckService(service string) { m.Called(service) } +func (m *MockSender) SetNoIndex(noIndex bool) { + m.Called(noIndex) +} + // FinalizeCheckServiceTag enables the sending of check service tag mock call. func (m *MockSender) FinalizeCheckServiceTag() { m.Called() diff --git a/pkg/aggregator/mocksender/mocksender.go b/pkg/aggregator/mocksender/mocksender.go index 93844c830d290..1a00b6e92263f 100644 --- a/pkg/aggregator/mocksender/mocksender.go +++ b/pkg/aggregator/mocksender/mocksender.go @@ -86,6 +86,7 @@ func (m *MockSender) SetupAcceptAll() { m.On("SetCheckCustomTags", mock.AnythingOfType("[]string")).Return() m.On("SetCheckService", mock.AnythingOfType("string")).Return() m.On("FinalizeCheckServiceTag").Return() + m.On("SetNoIndex").Return() m.On("Commit").Return() } diff --git a/pkg/aggregator/sender.go b/pkg/aggregator/sender.go index 6c424864abfaa..d45dc6323a99a 100644 --- a/pkg/aggregator/sender.go +++ b/pkg/aggregator/sender.go @@ -37,6 +37,7 @@ type Sender interface { DisableDefaultHostname(disable bool) SetCheckCustomTags(tags []string) SetCheckService(service string) + SetNoIndex(noIndex bool) FinalizeCheckServiceTag() OrchestratorMetadata(msgs []serializer.ProcessMessageBody, clusterID string, nodeType int) OrchestratorManifest(msgs []serializer.ProcessMessageBody, clusterID string) @@ -65,6 +66,7 @@ type checkSender struct { eventPlatformOut chan<- senderEventPlatformEvent checkTags []string service string + noIndex bool } // senderItem knows how the aggregator should handle it @@ -200,6 +202,10 @@ func (s *checkSender) FinalizeCheckServiceTag() { } } +func (s *checkSender) SetNoIndex(noIndex bool) { + s.noIndex = noIndex +} + // Commit commits the metric samples & histogram buckets that were added during a check run // Should be called at the end of every check run func (s *checkSender) Commit() { @@ -248,7 +254,7 @@ func (s *checkSender) sendMetricSample( SampleRate: 1, Timestamp: timeNowNano(), FlushFirstValue: flushFirstValue, - NoIndex: noIndex, + NoIndex: s.noIndex || noIndex, } if hostname == "" && !s.defaultHostnameDisabled { diff --git a/pkg/aggregator/time_sampler.go b/pkg/aggregator/time_sampler.go index a1bf20f875a90..109cde782cc35 100644 --- a/pkg/aggregator/time_sampler.go +++ b/pkg/aggregator/time_sampler.go @@ -9,7 +9,9 @@ import ( "fmt" "github.com/DataDog/datadog-agent/pkg/aggregator/ckey" + "github.com/DataDog/datadog-agent/pkg/aggregator/internal/limiter" "github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags" + "github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags_limiter" "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -42,7 +44,7 @@ type TimeSampler struct { } // NewTimeSampler returns a newly initialized TimeSampler -func NewTimeSampler(id TimeSamplerID, interval int64, cache *tags.Store, hostname string) *TimeSampler { +func NewTimeSampler(id TimeSamplerID, interval int64, cache *tags.Store, contextsLimiter *limiter.Limiter, tagsLimiter *tags_limiter.Limiter, hostname string) *TimeSampler { if interval == 0 { interval = bucketSize } @@ -51,7 +53,7 @@ func NewTimeSampler(id TimeSamplerID, interval int64, cache *tags.Store, hostnam s := &TimeSampler{ interval: interval, - contextResolver: newTimestampContextResolver(cache), + contextResolver: newTimestampContextResolver(cache, contextsLimiter, tagsLimiter), metricsByTimestamp: map[int64]metrics.ContextMetrics{}, counterLastSampledByContext: map[ckey.ContextKey]float64{}, sketchMap: make(sketchMap), @@ -77,7 +79,11 @@ func (s *TimeSampler) sample(metricSample *metrics.MetricSample, timestamp float } // Keep track of the context - contextKey := s.contextResolver.trackContext(metricSample, timestamp) + contextKey, ok := s.contextResolver.trackContext(metricSample, timestamp) + if !ok { + return + } + bucketStart := s.calculateBucketStart(timestamp) switch metricSample.Mtype { @@ -235,9 +241,7 @@ func (s *TimeSampler) flush(timestamp float64, series metrics.SerieSink, sketche tlmDogstatsdContextsByMtype.Set(float64(count), mtype) } - if config.Datadog.GetBool("telemetry.enabled") && config.Datadog.GetBool("telemetry.dogstatsd_origin") { - s.sendOriginTelemetry(timestamp, series) - } + s.sendTelemetry(timestamp, series) } // flushContextMetrics flushes the contextMetrics inside contextMetricsFlusher, handles its errors, @@ -285,7 +289,11 @@ func (s *TimeSampler) countersSampleZeroValue(timestamp int64, contextMetrics me } } -func (s *TimeSampler) sendOriginTelemetry(timestamp float64, series metrics.SerieSink) { +func (s *TimeSampler) sendTelemetry(timestamp float64, series metrics.SerieSink) { + if !config.Datadog.GetBool("telemetry.enabled") { + return + } + // If multiple samplers are used, this avoids the need to // aggregate the stats agent-side, and allows us to see amount of // tags duplication between shards. @@ -293,5 +301,11 @@ func (s *TimeSampler) sendOriginTelemetry(timestamp float64, series metrics.Seri fmt.Sprintf("sampler_id:%d", s.id), } - s.contextResolver.sendOriginTelemetry(timestamp, series, s.hostname, tags) + if config.Datadog.GetBool("telemetry.dogstatsd_origin") { + s.contextResolver.sendOriginTelemetry(timestamp, series, s.hostname, tags) + } + + if config.Datadog.GetBool("telemetry.dogstatsd_limiter") { + s.contextResolver.sendLimiterTelemetry(timestamp, series, s.hostname, tags) + } } diff --git a/pkg/aggregator/time_sampler_test.go b/pkg/aggregator/time_sampler_test.go index 52ef73416de31..5fc0efa723dff 100644 --- a/pkg/aggregator/time_sampler_test.go +++ b/pkg/aggregator/time_sampler_test.go @@ -8,6 +8,7 @@ package aggregator import ( + "fmt" "math" "sort" "testing" @@ -16,7 +17,9 @@ import ( "github.com/stretchr/testify/require" "github.com/DataDog/datadog-agent/pkg/aggregator/ckey" + "github.com/DataDog/datadog-agent/pkg/aggregator/internal/limiter" "github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags" + "github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags_limiter" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/tagset" "github.com/DataDog/opentelemetry-mapping-go/pkg/quantile" @@ -32,7 +35,7 @@ func generateSerieContextKey(serie *metrics.Serie) ckey.ContextKey { } func testTimeSampler() *TimeSampler { - sampler := NewTimeSampler(TimeSamplerID(0), 10, tags.NewStore(false, "test"), "host") + sampler := NewTimeSampler(TimeSamplerID(0), 10, tags.NewStore(false, "test"), nil, nil, "host") return sampler } @@ -513,7 +516,7 @@ func TestBucketSamplingWithSketchAndSeries(t *testing.T) { } func benchmarkTimeSampler(b *testing.B, store *tags.Store) { - sampler := testTimeSampler() + sampler := NewTimeSampler(TimeSamplerID(0), 10, store, nil, nil, "host") sample := metrics.MetricSample{ Name: "my.metric.name", @@ -531,6 +534,39 @@ func BenchmarkTimeSampler(b *testing.B) { benchWithTagsStore(b, benchmarkTimeSampler) } +func BenchmarkTimeSamplerWithLimiter(b *testing.B) { + sample1 := metrics.MetricSample{ + Name: "foo", + Value: 1, + Mtype: metrics.GaugeType, + Tags: []string{"foo", "bar"}, + SampleRate: 1, + Timestamp: 12345.0, + } + sample2 := metrics.MetricSample{ + Name: "bar", + Value: 1, + Mtype: metrics.GaugeType, + Tags: []string{"foo", "bar"}, + SampleRate: 1, + Timestamp: 12345.0, + } + + for limit := range []int{0, 1, 2, 3} { + store := tags.NewStore(false, "test") + limiter := limiter.New(limit, "pod", []string{"pod"}) + tagsLimiter := tags_limiter.New(5) + sampler := NewTimeSampler(TimeSamplerID(0), 10, store, limiter, tagsLimiter, "host") + + b.Run(fmt.Sprintf("limit=%d", limit), func(b *testing.B) { + for n := 0; n < b.N; n++ { + sampler.sample(&sample1, 12345.0) + sampler.sample(&sample2, 12345.0) + } + }) + } +} + func flushSerie(sampler *TimeSampler, timestamp float64) (metrics.Series, metrics.SketchSeriesList) { var series metrics.Series var sketches metrics.SketchSeriesList diff --git a/pkg/autodiscovery/integration/config.go b/pkg/autodiscovery/integration/config.go index 718957e4c0e71..df7270c4ca933 100644 --- a/pkg/autodiscovery/integration/config.go +++ b/pkg/autodiscovery/integration/config.go @@ -112,6 +112,7 @@ type CommonInstanceConfig struct { Service string `yaml:"service"` Name string `yaml:"name"` Namespace string `yaml:"namespace"` + NoIndex bool `yaml:"no_index"` } // CommonGlobalConfig holds the reserved fields for the yaml init_config data diff --git a/pkg/autodiscovery/listeners/staticconfig.go b/pkg/autodiscovery/listeners/staticconfig.go new file mode 100644 index 0000000000000..81e83473e11a8 --- /dev/null +++ b/pkg/autodiscovery/listeners/staticconfig.go @@ -0,0 +1,124 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023-present Datadog, Inc. + +package listeners + +import ( + "context" + + "github.com/DataDog/datadog-agent/pkg/autodiscovery/integration" + "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/util/containers" +) + +// StaticConfigListener implements a ServiceListener based on static configuration parameters +type StaticConfigListener struct { + newService chan<- Service +} + +// StaticConfigService represents services generated from StaticConfigListener +type StaticConfigService struct { + adIdentifier string +} + +// Make sure StaticConfigService implements the Service interface +var _ Service = &StaticConfigService{} + +func init() { + Register("static config", NewStaticConfigListener) +} + +// NewStaticConfigListener creates a StaticConfigListener +func NewStaticConfigListener(Config) (ServiceListener, error) { + return &StaticConfigListener{}, nil +} + +// Listen starts the goroutine to detect checks based on the config +func (l *StaticConfigListener) Listen(newSvc chan<- Service, delSvc chan<- Service) { + l.newService = newSvc + + go l.createServices() +} + +// Stop has nothing to do in this case +func (l *StaticConfigListener) Stop() { +} + +func (l *StaticConfigListener) createServices() { + for _, staticCheck := range []string{ + "container_image", + "container_lifecycle", + "sbom", + } { + if enabled := config.Datadog.GetBool(staticCheck + ".enabled"); enabled { + l.newService <- &StaticConfigService{adIdentifier: "_" + staticCheck} + } + } +} + +// GetServiceID returns the unique entity name linked to that service +func (s *StaticConfigService) GetServiceID() string { + return s.adIdentifier +} + +// GetTaggerEntity returns the tagger entity +func (s *StaticConfigService) GetTaggerEntity() string { + return "" +} + +// GetADIdentifiers return the single AD identifier for a static config service +func (s *StaticConfigService) GetADIdentifiers(context.Context) ([]string, error) { + return []string{s.adIdentifier}, nil +} + +// GetHosts is not supported +func (s *StaticConfigService) GetHosts(context.Context) (map[string]string, error) { + return nil, ErrNotSupported +} + +// GetPorts returns nil and an error because port is not supported in this listener +func (s *StaticConfigService) GetPorts(context.Context) ([]ContainerPort, error) { + return nil, ErrNotSupported +} + +// GetTags retrieves a container's tags +func (s *StaticConfigService) GetTags() ([]string, error) { + return nil, nil +} + +// GetPid inspect the container and return its pid +// Not relevant in this listener +func (s *StaticConfigService) GetPid(context.Context) (int, error) { + return -1, ErrNotSupported +} + +// GetHostname returns nil and an error because port is not supported in this listener +func (s *StaticConfigService) GetHostname(context.Context) (string, error) { + return "", ErrNotSupported +} + +// IsReady is always true +func (s *StaticConfigService) IsReady(context.Context) bool { + return true +} + +// GetCheckNames is not supported +func (s *StaticConfigService) GetCheckNames(context.Context) []string { + return nil +} + +// HasFilter is not supported +func (s *StaticConfigService) HasFilter(filter containers.FilterType) bool { + return false +} + +// GetExtraConfig is not supported +func (s *StaticConfigService) GetExtraConfig(key string) (string, error) { + return "", ErrNotSupported +} + +// FilterTemplates does nothing. +func (s *StaticConfigService) FilterTemplates(configs map[string]integration.Config) { +} diff --git a/pkg/cli/subcommands/check/command.go b/pkg/cli/subcommands/check/command.go index e08def7d0cc50..95803fc7dc63c 100644 --- a/pkg/cli/subcommands/check/command.go +++ b/pkg/cli/subcommands/check/command.go @@ -182,8 +182,8 @@ func run(log log.Component, config config.Component, sysprobeconfig sysprobeconf // Always disable SBOM collection in `check` command to avoid BoltDB flock issue // and consuming CPU & Memory for asynchronous scans that would not be shown in `agent check` output. - pkgconfig.Datadog.Set("sbom.enabled", "false") - pkgconfig.Datadog.Set("container_image_collection.sbom.enabled", "false") + pkgconfig.Datadog.Set("sbom.host.enabled", "false") + pkgconfig.Datadog.Set("sbom.container_image.enabled", "false") pkgconfig.Datadog.Set("runtime_security_config.sbom.enabled", "false") hostnameDetected, err := hostname.Get(context.TODO()) diff --git a/pkg/clusteragent/admission/api_discovery.go b/pkg/clusteragent/admission/api_discovery.go index 50455923ec2d5..ef376755a4fcf 100644 --- a/pkg/clusteragent/admission/api_discovery.go +++ b/pkg/clusteragent/admission/api_discovery.go @@ -8,134 +8,28 @@ package admission import ( - "context" - "time" - - "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/retry" - - v1 "k8s.io/api/admissionregistration/v1" - "k8s.io/api/admissionregistration/v1beta1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/DataDog/datadog-agent/pkg/errors" + "k8s.io/client-go/discovery" ) -const ( - success = tryResult(iota) - notSupported - unknown -) - -type tryResult uint8 - -// apiDiscovery is a local struct adapted to the agent retry package. -// It allow discovering the Admissionregistration group versions with a retrier. -type apiDiscovery struct { - v1retrier retry.Retrier - v1beta1retrier retry.Retrier - v1Lister func(ctx context.Context, opts metav1.ListOptions) (*v1.MutatingWebhookConfigurationList, error) - v1beta1Lister func(ctx context.Context, opts metav1.ListOptions) (*v1beta1.MutatingWebhookConfigurationList, error) -} - -func (a *apiDiscovery) tryV1() error { - _, err := a.v1Lister(context.TODO(), metav1.ListOptions{}) - return err -} - -func (a *apiDiscovery) tryV1beta1() error { - _, err := a.v1beta1Lister(context.TODO(), metav1.ListOptions{}) - return err -} - -func newAPIDiscovery(ctx ControllerContext, retryCount uint, retryDelay time.Duration) (*apiDiscovery, error) { - discovery := &apiDiscovery{ - v1Lister: ctx.Client.AdmissionregistrationV1().MutatingWebhookConfigurations().List, - v1beta1Lister: ctx.Client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().List, - } - - if err := discovery.v1retrier.SetupRetrier(&retry.Config{ - Name: "AdmissionV1Discovery", - AttemptMethod: discovery.tryV1, - Strategy: retry.RetryCount, - RetryCount: retryCount, - RetryDelay: retryDelay, - }); err != nil { - return nil, err - } - - if err := discovery.v1beta1retrier.SetupRetrier(&retry.Config{ - Name: "AdmissionV1beta1Discovery", - AttemptMethod: discovery.tryV1beta1, - Strategy: retry.RetryCount, - RetryCount: retryCount, - RetryDelay: retryDelay, - }); err != nil { - return nil, err - } - - return discovery, nil -} - -func errToResult(err error) tryResult { - if err == nil { - return success - } - - if apierrors.IsNotFound(err) { - return notSupported - } - - return unknown -} - -func try(r *retry.Retrier, groupVersion string) tryResult { - log.Debugf("Trying Group version %q", groupVersion) - for { - _ = r.TriggerRetry() - switch r.RetryStatus() { - case retry.OK: - return success - case retry.PermaFail: - err := r.LastError() - log.Infof("Stopped retrying %q, last err: %v", groupVersion, err) - return errToResult(err) - } - } -} - -// useAdmissionV1 discovers which admissionregistration version should be used between v1beta1 and v1. -// - It tries to list v1 objects -// - If it succeed, fast return true -// - If it fails, it retries 3 times then tries v1beta1 -// - If v1beta1 succeed, return false -// - If both versions can't be reached, fallback to v1 -// - It fallback to v1beta1 only when v1 is explicitly not supported (got not found error) -func useAdmissionV1(ctx ControllerContext) (bool, error) { - discovery, err := newAPIDiscovery(ctx, 3, 1*time.Second) +// UseAdmissionV1 discovers which admissionregistration version should be used between v1beta1 and v1. +func UseAdmissionV1(client discovery.DiscoveryInterface) (bool, error) { + groups, err := client.ServerGroups() if err != nil { return false, err } - resultV1 := try(&discovery.v1retrier, "admissionregistration.k8s.io/v1") - if resultV1 == success { - log.Info("Group version 'admissionregistration.k8s.io/v1' is available, using it") - return true, nil - } - - resultV1beta1 := try(&discovery.v1beta1retrier, "admissionregistration.k8s.io/v1beta1") - if resultV1beta1 == success { - log.Info("Group version 'admissionregistration.k8s.io/v1beta' is available, using it") - return false, nil - } - - if resultV1 == notSupported && resultV1beta1 == unknown { - // The only case where we want to fallback to v1beta1 is when v1 is explicitly not supported - log.Info("Group version 'admissionregistration.k8s.io/v1' is not supported, falling back to 'v1beta'") - return false, nil + admission := "admissionregistration.k8s.io" + for _, group := range groups.Groups { + if group.Name == admission { + for _, version := range group.Versions { + if version.Version == "v1" { + return true, nil + } + } + return false, nil + } } - // In case of no success in both versions, fallback to the newest (v1) - log.Info("Falling back to 'admissionregistration.k8s.io/v1'") - - return true, nil + return false, errors.NewNotFound(admission) } diff --git a/pkg/clusteragent/admission/common/lib_config.go b/pkg/clusteragent/admission/common/lib_config.go index e854b26c8bb4a..255d5802c4849 100644 --- a/pkg/clusteragent/admission/common/lib_config.go +++ b/pkg/clusteragent/admission/common/lib_config.go @@ -40,6 +40,7 @@ type LibConfig struct { TracingMethods []string `yaml:"tracing_methods" json:"tracing_methods,omitempty"` TracingPropagationStyleInject []string `yaml:"tracing_propagation_style_inject" json:"tracing_propagation_style_inject,omitempty"` TracingPropagationStyleExtract []string `yaml:"tracing_propagation_style_extract" json:"tracing_propagation_style_extract,omitempty"` + DataStreams *bool `yaml:"data_streams_enabled" json:"data_streams_enabled,omitempty"` } // TracingServiceMapEntry holds service mapping config @@ -173,6 +174,12 @@ func (lc LibConfig) ToEnvs() []corev1.EnvVar { Value: strings.Join(lc.TracingPropagationStyleExtract, ","), }) } + if val, defined := checkFormatVal(lc.DataStreams); defined { + envs = append(envs, corev1.EnvVar{ + Name: "DD_DATA_STREAMS_ENABLED", + Value: val, + }) + } return envs } diff --git a/pkg/clusteragent/admission/common/lib_config_test.go b/pkg/clusteragent/admission/common/lib_config_test.go index 1566f82b72545..98730f638f34e 100644 --- a/pkg/clusteragent/admission/common/lib_config_test.go +++ b/pkg/clusteragent/admission/common/lib_config_test.go @@ -32,6 +32,7 @@ func TestLibConfig_ToEnvs(t *testing.T) { TracingHeaderTags []TracingHeaderTagEntry TracingPartialFlushMinSpans *int TracingDebug *bool + DataStreams *bool TracingLogLevel *string TracingMethods []string TracingPropagationStyleInject []string @@ -75,6 +76,7 @@ func TestLibConfig_ToEnvs(t *testing.T) { }, TracingPartialFlushMinSpans: pointer.Ptr(100), TracingDebug: pointer.Ptr(true), + DataStreams: pointer.Ptr(true), TracingLogLevel: pointer.Ptr("DEBUG"), TracingMethods: []string{"modA.method", "modB.method"}, TracingPropagationStyleInject: []string{"Datadog", "B3", "W3C"}, @@ -153,6 +155,10 @@ func TestLibConfig_ToEnvs(t *testing.T) { Name: "DD_PROPAGATION_STYLE_EXTRACT", Value: "W3C,B3,Datadog", }, + { + Name: "DD_DATA_STREAMS_ENABLED", + Value: "true", + }, }, }, { @@ -196,6 +202,7 @@ func TestLibConfig_ToEnvs(t *testing.T) { TracingHeaderTags: tt.fields.TracingHeaderTags, TracingPartialFlushMinSpans: tt.fields.TracingPartialFlushMinSpans, TracingDebug: tt.fields.TracingDebug, + DataStreams: tt.fields.DataStreams, TracingLogLevel: tt.fields.TracingLogLevel, TracingMethods: tt.fields.TracingMethods, TracingPropagationStyleInject: tt.fields.TracingPropagationStyleInject, diff --git a/pkg/clusteragent/admission/mutate/auto_instrumentation.go b/pkg/clusteragent/admission/mutate/auto_instrumentation.go index 4ec53e42b987e..c166eb9a44fd8 100644 --- a/pkg/clusteragent/admission/mutate/auto_instrumentation.go +++ b/pkg/clusteragent/admission/mutate/auto_instrumentation.go @@ -319,6 +319,13 @@ func injectAutoInstruConfig(pod *corev1.Pod, libsToInject []libInfo) error { } } + // try to inject all if the annotation is set + if err := injectLibConfig(pod, "all"); err != nil { + metrics.LibInjectionErrors.Inc("all") + lastError = err + log.Errorf("Cannot inject library configuration into pod %s: %s", podString(pod), err) + } + injectLibVolume(pod) return lastError @@ -373,7 +380,7 @@ func initResources() (corev1.ResourceRequirements, bool, error) { return resources, hasResources, nil } -// injectLibRequirements injects the minimal config requirements to enable instrumentation +// injectLibRequirements injects the minimal config requirements (env vars and volume mounts) to enable instrumentation func injectLibRequirements(pod *corev1.Pod, ctrName string, envVars []envVar) error { for i, ctr := range pod.Spec.Containers { if ctrName != "" && ctrName != ctr.Name { @@ -416,7 +423,7 @@ func injectLibConfig(pod *corev1.Pod, lang language) error { configAnnotKey := fmt.Sprintf(common.LibConfigV1AnnotKeyFormat, lang) confString, found := pod.GetAnnotations()[configAnnotKey] if !found { - log.Debugf("Config annotation key %q not found on pod %s, skipping config injection", configAnnotKey, podString(pod)) + log.Tracef("Config annotation key %q not found on pod %s, skipping config injection", configAnnotKey, podString(pod)) return nil } log.Infof("Config annotation key %q found on pod %s, config: %q", configAnnotKey, podString(pod), confString) diff --git a/pkg/clusteragent/admission/mutate/auto_instrumentation_test.go b/pkg/clusteragent/admission/mutate/auto_instrumentation_test.go index c5b6f20adeafe..fabd000e80466 100644 --- a/pkg/clusteragent/admission/mutate/auto_instrumentation_test.go +++ b/pkg/clusteragent/admission/mutate/auto_instrumentation_test.go @@ -15,6 +15,7 @@ import ( "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/dynamic/fake" ) func TestInjectAutoInstruConfig(t *testing.T) { @@ -505,6 +506,22 @@ func TestInjectLibConfig(t *testing.T) { }, }, }, + { + name: "inject all case", + pod: fakePodWithAnnotation("admission.datadoghq.com/all-lib.config.v1", `{"version":1,"service_language":"all","runtime_metrics_enabled":true,"tracing_rate_limit":50}`), + lang: "all", + wantErr: false, + expectedEnvs: []corev1.EnvVar{ + { + Name: "DD_RUNTIME_METRICS_ENABLED", + Value: "true", + }, + { + Name: "DD_TRACE_RATE_LIMIT", + Value: "50", + }, + }, + }, { name: "invalid json", pod: fakePodWithAnnotation("admission.datadoghq.com/java-lib.config.v1", "invalid"), @@ -667,3 +684,329 @@ func TestInjectAll(t *testing.T) { }) } } + +func TestInjectAutoInstrumentation(t *testing.T) { + tests := []struct { + name string + pod *corev1.Pod + expectedEnvs []corev1.EnvVar + wantErr bool + }{ + { + name: "inject all", + pod: fakePodWithAnnotations(map[string]string{ + "admission.datadoghq.com/all-lib.version": "latest", + "admission.datadoghq.com/all-lib.config.v1": `{"version":1,"runtime_metrics_enabled":true,"tracing_rate_limit":50,"tracing_sampling_rate":0.3}`, + }), + expectedEnvs: []corev1.EnvVar{ + { + Name: "DD_RUNTIME_METRICS_ENABLED", + Value: "true", + }, + { + Name: "DD_TRACE_RATE_LIMIT", + Value: "50", + }, + { + Name: "DD_TRACE_SAMPLE_RATE", + Value: "0.30", + }, + { + Name: "PYTHONPATH", + Value: "/datadog-lib/", + }, + { + Name: "RUBYOPT", + Value: " -r/datadog-lib/auto_inject", + }, + { + Name: "NODE_OPTIONS", + Value: " --require=/datadog-lib/node_modules/dd-trace/init", + }, + { + Name: "JAVA_TOOL_OPTIONS", + Value: " -javaagent:/datadog-lib/dd-java-agent.jar", + }, + { + Name: "DD_DOTNET_TRACER_HOME", + Value: "/datadog-lib", + }, + { + Name: "CORECLR_ENABLE_PROFILING", + Value: "1", + }, + { + Name: "CORECLR_PROFILER", + Value: "{846F5F1C-F9AE-4B07-969E-05C26BC060D8}", + }, + { + Name: "CORECLR_PROFILER_PATH", + Value: "/datadog-lib/Datadog.Trace.ClrProfiler.Native.so", + }, + { + Name: "DD_TRACE_LOG_DIRECTORY", + Value: "/datadog-lib/logs", + }, + { + Name: "LD_PRELOAD", + Value: "/datadog-lib/continuousprofiler/Datadog.Linux.ApiWrapper.x64.so", + }, + }, + wantErr: false, + }, + { + name: "inject java", + pod: fakePodWithAnnotations(map[string]string{ + "admission.datadoghq.com/java-lib.version": "latest", + "admission.datadoghq.com/java-lib.config.v1": `{"version":1,"tracing_sampling_rate":0.3}`, + }), + expectedEnvs: []corev1.EnvVar{ + { + Name: "DD_TRACE_SAMPLE_RATE", + Value: "0.30", + }, + { + Name: "JAVA_TOOL_OPTIONS", + Value: " -javaagent:/datadog-lib/dd-java-agent.jar", + }, + }, + wantErr: false, + }, + { + name: "inject python", + pod: fakePodWithAnnotations(map[string]string{ + "admission.datadoghq.com/python-lib.version": "latest", + "admission.datadoghq.com/python-lib.config.v1": `{"version":1,"tracing_sampling_rate":0.3}`, + }), + expectedEnvs: []corev1.EnvVar{ + { + Name: "DD_TRACE_SAMPLE_RATE", + Value: "0.30", + }, + { + Name: "PYTHONPATH", + Value: "/datadog-lib/", + }, + }, + wantErr: false, + }, + { + name: "inject node", + pod: fakePodWithAnnotations(map[string]string{ + "admission.datadoghq.com/js-lib.version": "latest", + "admission.datadoghq.com/js-lib.config.v1": `{"version":1,"tracing_sampling_rate":0.3}`, + }), + expectedEnvs: []corev1.EnvVar{ + { + Name: "DD_TRACE_SAMPLE_RATE", + Value: "0.30", + }, + { + Name: "NODE_OPTIONS", + Value: " --require=/datadog-lib/node_modules/dd-trace/init", + }, + }, + wantErr: false, + }, + { + name: "inject library and all", + pod: fakePodWithAnnotations(map[string]string{ + "admission.datadoghq.com/all-lib.version": "latest", + "admission.datadoghq.com/all-lib.config.v1": `{"version":1,"runtime_metrics_enabled":true,"tracing_rate_limit":50,"tracing_sampling_rate":0.3}`, + "admission.datadoghq.com/js-lib.version": "v1.10", + "admission.datadoghq.com/js-lib.config.v1": `{"version":1,"tracing_sampling_rate":0.4}`, + }), + expectedEnvs: []corev1.EnvVar{ + { + Name: "DD_RUNTIME_METRICS_ENABLED", + Value: "true", + }, + { + Name: "DD_TRACE_RATE_LIMIT", + Value: "50", + }, + { + Name: "DD_TRACE_SAMPLE_RATE", + Value: "0.40", + }, + { + Name: "NODE_OPTIONS", + Value: " --require=/datadog-lib/node_modules/dd-trace/init", + }, + }, + wantErr: false, + }, + { + name: "inject library and all no library version", + pod: fakePodWithAnnotations(map[string]string{ + "admission.datadoghq.com/all-lib.version": "latest", + "admission.datadoghq.com/all-lib.config.v1": `{"version":1,"runtime_metrics_enabled":true,"tracing_rate_limit":50,"tracing_sampling_rate":0.3}`, + "admission.datadoghq.com/js-lib.config.v1": `{"version":1,"tracing_sampling_rate":0.4}`, + }), + expectedEnvs: []corev1.EnvVar{ + { + Name: "DD_RUNTIME_METRICS_ENABLED", + Value: "true", + }, + { + Name: "DD_TRACE_RATE_LIMIT", + Value: "50", + }, + { + Name: "PYTHONPATH", + Value: "/datadog-lib/", + }, + { + Name: "RUBYOPT", + Value: " -r/datadog-lib/auto_inject", + }, + { + Name: "NODE_OPTIONS", + Value: " --require=/datadog-lib/node_modules/dd-trace/init", + }, + { + Name: "JAVA_TOOL_OPTIONS", + Value: " -javaagent:/datadog-lib/dd-java-agent.jar", + }, + { + Name: "DD_DOTNET_TRACER_HOME", + Value: "/datadog-lib", + }, + { + Name: "CORECLR_ENABLE_PROFILING", + Value: "1", + }, + { + Name: "CORECLR_PROFILER", + Value: "{846F5F1C-F9AE-4B07-969E-05C26BC060D8}", + }, + { + Name: "CORECLR_PROFILER_PATH", + Value: "/datadog-lib/Datadog.Trace.ClrProfiler.Native.so", + }, + { + Name: "DD_TRACE_LOG_DIRECTORY", + Value: "/datadog-lib/logs", + }, + { + Name: "LD_PRELOAD", + Value: "/datadog-lib/continuousprofiler/Datadog.Linux.ApiWrapper.x64.so", + }, + { + Name: "DD_TRACE_SAMPLE_RATE", + Value: "0.40", + }, + }, + wantErr: false, + }, + { + name: "inject all error - bad json", + pod: fakePodWithAnnotations(map[string]string{ + // TODO: we might not want to be injecting the libraries if the config is malformed + "admission.datadoghq.com/all-lib.version": "latest", + "admission.datadoghq.com/all-lib.config.v1": `{"version":1,"runtime_metrics_enabled":true,`, + }), + expectedEnvs: []corev1.EnvVar{ + { + Name: "PYTHONPATH", + Value: "/datadog-lib/", + }, + { + Name: "RUBYOPT", + Value: " -r/datadog-lib/auto_inject", + }, + { + Name: "NODE_OPTIONS", + Value: " --require=/datadog-lib/node_modules/dd-trace/init", + }, + { + Name: "JAVA_TOOL_OPTIONS", + Value: " -javaagent:/datadog-lib/dd-java-agent.jar", + }, + { + Name: "DD_DOTNET_TRACER_HOME", + Value: "/datadog-lib", + }, + { + Name: "CORECLR_ENABLE_PROFILING", + Value: "1", + }, + { + Name: "CORECLR_PROFILER", + Value: "{846F5F1C-F9AE-4B07-969E-05C26BC060D8}", + }, + { + Name: "CORECLR_PROFILER_PATH", + Value: "/datadog-lib/Datadog.Trace.ClrProfiler.Native.so", + }, + { + Name: "DD_TRACE_LOG_DIRECTORY", + Value: "/datadog-lib/logs", + }, + { + Name: "LD_PRELOAD", + Value: "/datadog-lib/continuousprofiler/Datadog.Linux.ApiWrapper.x64.so", + }, + }, + wantErr: true, + }, + { + name: "inject java bad json", + pod: fakePodWithAnnotations(map[string]string{ + "admission.datadoghq.com/java-lib.version": "latest", + "admission.datadoghq.com/java-lib.config.v1": `{"version":1,"runtime_metrics_enabled":true,`, + }), + expectedEnvs: []corev1.EnvVar{ + { + Name: "JAVA_TOOL_OPTIONS", + Value: " -javaagent:/datadog-lib/dd-java-agent.jar", + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := injectAutoInstrumentation(tt.pod, "", fake.NewSimpleDynamicClient(scheme)) + require.False(t, (err != nil) != tt.wantErr) + + container := tt.pod.Spec.Containers[0] + for _, contEnv := range container.Env { + found := false + for _, expectEnv := range tt.expectedEnvs { + if expectEnv.Name == contEnv.Name { + found = true + break + } + } + if !found { + require.Failf(t, "Unexpected env var injected in container", contEnv.Name) + } + } + for _, expectEnv := range tt.expectedEnvs { + found := false + for _, contEnv := range container.Env { + if expectEnv.Name == contEnv.Name { + found = true + break + } + } + if !found { + require.Failf(t, "Unexpected env var injected in container", expectEnv.Name) + } + } + + envCount := 0 + for _, contEnv := range container.Env { + for _, expectEnv := range tt.expectedEnvs { + if expectEnv.Name == contEnv.Name { + require.Equal(t, expectEnv.Value, contEnv.Value) + envCount++ + break + } + } + } + require.Equal(t, len(tt.expectedEnvs), envCount) + }) + } +} diff --git a/pkg/clusteragent/admission/mutate/test_utils.go b/pkg/clusteragent/admission/mutate/test_utils.go index a8931a7d07c47..52fb9ce067e78 100644 --- a/pkg/clusteragent/admission/mutate/test_utils.go +++ b/pkg/clusteragent/admission/mutate/test_utils.go @@ -96,6 +96,16 @@ func fakePodWithAnnotation(k, v string) *corev1.Pod { return withContainer(pod, "-container") } +func fakePodWithAnnotations(as map[string]string) *corev1.Pod { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + Annotations: as, + }, + } + return withContainer(pod, "-container") +} + func fakePodWithEnv(name, env string) *corev1.Pod { return fakePodWithContainer(name, corev1.Container{Name: name + "-container", Env: []corev1.EnvVar{fakeEnv(env)}}) } diff --git a/pkg/clusteragent/admission/start.go b/pkg/clusteragent/admission/start.go index 5f887c1e59933..9f8ea8973dfe1 100644 --- a/pkg/clusteragent/admission/start.go +++ b/pkg/clusteragent/admission/start.go @@ -62,7 +62,7 @@ func StartControllers(ctx ControllerContext) error { return err } - v1Enabled, err := useAdmissionV1(ctx) + v1Enabled, err := UseAdmissionV1(ctx.DiscoveryClient) if err != nil { return err } diff --git a/pkg/clusteragent/api/handler_telemetry.go b/pkg/clusteragent/api/handler_telemetry.go index 55901e26c1b00..4f63b809e02d7 100644 --- a/pkg/clusteragent/api/handler_telemetry.go +++ b/pkg/clusteragent/api/handler_telemetry.go @@ -53,6 +53,7 @@ type telemetryWriterWrapper struct { } func (w *telemetryWriterWrapper) WriteHeader(statusCode int) { + w.ResponseWriter.WriteHeader(statusCode) forwarded := w.Header().Get(respForwarded) if forwarded == "" { forwarded = "false" diff --git a/pkg/clusteragent/clusterchecks/dispatcher_main.go b/pkg/clusteragent/clusterchecks/dispatcher_main.go index dbe1a0fa30073..1dd1166414746 100644 --- a/pkg/clusteragent/clusterchecks/dispatcher_main.go +++ b/pkg/clusteragent/clusterchecks/dispatcher_main.go @@ -34,6 +34,7 @@ type dispatcher struct { extraTags []string clcRunnersClient clusteragent.CLCRunnerClientInterface advancedDispatching bool + excludedChecks map[string]struct{} } func newDispatcher() *dispatcher { @@ -43,6 +44,15 @@ func newDispatcher() *dispatcher { d.nodeExpirationSeconds = config.Datadog.GetInt64("cluster_checks.node_expiration_timeout") d.extraTags = config.Datadog.GetStringSlice("cluster_checks.extra_tags") + excludedChecks := config.Datadog.GetStringSlice("cluster_checks.exclude_checks") + // This option will almost always be empty + if len(excludedChecks) > 0 { + d.excludedChecks = make(map[string]struct{}, len(excludedChecks)) + for _, checkName := range excludedChecks { + d.excludedChecks[checkName] = struct{}{} + } + } + hname, _ := hostname.Get(context.TODO()) clusterTagValue := clustername.GetClusterName(context.TODO(), hname) clusterTagName := config.Datadog.GetString("cluster_checks.cluster_tag_name") @@ -76,6 +86,11 @@ func (d *dispatcher) Stop() { // Schedule implements the scheduler.Scheduler interface func (d *dispatcher) Schedule(configs []integration.Config) { for _, c := range configs { + if _, found := d.excludedChecks[c.Name]; found { + log.Infof("Excluding check due to config: %s", c.Name) + continue + } + if !c.ClusterCheck { continue // Ignore non cluster-check configs } diff --git a/pkg/collector/check/stats.go b/pkg/collector/check/stats.go index c4c0eadedecce..81188b52c14c1 100644 --- a/pkg/collector/check/stats.go +++ b/pkg/collector/check/stats.go @@ -26,6 +26,7 @@ var EventPlatformNameTranslations = map[string]string{ "dbm-samples": "Database Monitoring Query Samples", "dbm-metrics": "Database Monitoring Query Metrics", "dbm-activity": "Database Monitoring Activity Samples", + "dbm-metadata": "Database Monitoring Metadata Samples", "network-devices-metadata": "Network Devices Metadata", "network-devices-netflow": "Network Devices NetFlow", "network-devices-snmp-traps": "SNMP Traps", diff --git a/pkg/collector/corechecks/checkbase.go b/pkg/collector/corechecks/checkbase.go index d8f17bfc299ba..7d6c25857409f 100644 --- a/pkg/collector/corechecks/checkbase.go +++ b/pkg/collector/corechecks/checkbase.go @@ -134,6 +134,15 @@ func (c *CheckBase) CommonConfigure(integrationConfigDigest uint64, initConfig, s.SetCheckService(commonOptions.Service) } + if commonOptions.NoIndex { + s, err := c.GetSender() + if err != nil { + log.Errorf("failed to retrieve a sender for check %s: %s", string(c.ID()), err) + return err + } + s.SetNoIndex(commonOptions.NoIndex) + } + c.source = source return nil } diff --git a/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go b/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go index 4a09020b2b6db..aa84ceeb9d333 100644 --- a/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go +++ b/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go @@ -32,6 +32,7 @@ import ( "golang.org/x/exp/maps" "gopkg.in/yaml.v2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/discovery" "k8s.io/client-go/tools/cache" "k8s.io/kube-state-metrics/v2/pkg/allowdenylist" @@ -232,8 +233,27 @@ func (k *KSMCheck) Configure(integrationConfigDigest uint64, config, initConfig builder := kubestatemetrics.New() + // Due to how init is done, we cannot use GetAPIClient in `Run()` method + // So we are waiting for a reasonable amount of time here in case. + // We cannot wait forever as there's no way to be notified of shutdown + apiCtx, apiCancel := context.WithTimeout(context.Background(), maximumWaitForAPIServer) + defer apiCancel() + c, err := apiserver.WaitForAPIClient(apiCtx) + if err != nil { + return err + } + + // Discover resources that are currently available + resources, err := discoverResources(c.DiscoveryCl) + if err != nil { + return err + } + // Prepare the collectors for the resources specified in the configuration file. - collectors := k.instance.Collectors + collectors, err := filterUnknownCollectors(k.instance.Collectors, resources) + if err != nil { + return err + } // Enable the KSM default collectors if the config collectors list is empty. if len(collectors) == 0 { @@ -270,16 +290,6 @@ func (k *KSMCheck) Configure(integrationConfigDigest uint64, config, initConfig builder.WithFamilyGeneratorFilter(allowDenyList) - // Due to how init is done, we cannot use GetAPIClient in `Run()` method - // So we are waiting for a reasonable amount of time here in case. - // We cannot wait forever as there's no way to be notified of shutdown - apiCtx, apiCancel := context.WithTimeout(context.Background(), maximumWaitForAPIServer) - defer apiCancel() - c, err := apiserver.WaitForAPIClient(apiCtx) - if err != nil { - return err - } - builder.WithKubeClient(c.Cl) builder.WithVPAClient(c.VPAClient) @@ -299,7 +309,7 @@ func (k *KSMCheck) Configure(integrationConfigDigest uint64, config, initConfig // configure custom resources required for extended features and // compatibility across deprecated/removed versions of APIs - cr := k.discoverCustomResources(c, collectors) + cr := k.discoverCustomResources(c, collectors, resources) builder.WithGenerateCustomResourceStoresFunc(builder.GenerateCustomResourceStoresFunc) builder.WithCustomResourceStoreFactories(cr.factories...) builder.WithCustomResourceClients(cr.clients) @@ -326,6 +336,39 @@ func (k *KSMCheck) Configure(integrationConfigDigest uint64, config, initConfig return nil } +func discoverResources(client discovery.DiscoveryInterface) ([]*v1.APIResourceList, error) { + resources, err := client.ServerResources() + if err != nil { + if !discovery.IsGroupDiscoveryFailedError(err) { + return nil, fmt.Errorf("unable to perform resource discovery: %s", err) + } else { + for group, apiGroupErr := range err.(*discovery.ErrGroupDiscoveryFailed).Groups { + log.Warnf("unable to perform resource discovery for group %s: %s", group, apiGroupErr) + } + } + } + return resources, nil +} + +func filterUnknownCollectors(collectors []string, resources []*v1.APIResourceList) ([]string, error) { + resourcesSet := make(map[string]struct{}, len(collectors)) + for _, resourceList := range resources { + for _, resource := range resourceList.APIResources { + resourcesSet[resource.Name] = struct{}{} + } + } + + filteredCollectors := make([]string, 0, len(collectors)) + for i := range collectors { + if _, ok := resourcesSet[collectors[i]]; ok { + filteredCollectors = append(filteredCollectors, collectors[i]) + } else { + log.Warnf("resource %v is unknown and will not be collected", collectors[i]) + } + } + return filteredCollectors, nil +} + func (c *KSMConfig) parse(data []byte) error { return yaml.Unmarshal(data, c) } @@ -336,7 +379,7 @@ type customResources struct { clients map[string]interface{} } -func (k *KSMCheck) discoverCustomResources(c *apiserver.APIClient, collectors []string) customResources { +func (k *KSMCheck) discoverCustomResources(c *apiserver.APIClient, collectors []string, resources []*v1.APIResourceList) customResources { // automatically add extended collectors if their standard ones are // enabled for _, c := range collectors { @@ -354,7 +397,7 @@ func (k *KSMCheck) discoverCustomResources(c *apiserver.APIClient, collectors [] customresources.NewExtendedPodFactory(c), } - factories = manageResourcesReplacement(c, factories) + factories = manageResourcesReplacement(c, factories, resources) clients := make(map[string]interface{}, len(factories)) for _, f := range factories { @@ -369,23 +412,12 @@ func (k *KSMCheck) discoverCustomResources(c *apiserver.APIClient, collectors [] } } -func manageResourcesReplacement(c *apiserver.APIClient, factories []customresource.RegistryFactory) []customresource.RegistryFactory { +func manageResourcesReplacement(c *apiserver.APIClient, factories []customresource.RegistryFactory, resources []*v1.APIResourceList) []customresource.RegistryFactory { if c.DiscoveryCl == nil { log.Warn("Kubernetes discovery client has not been properly initialized") return factories } - _, resources, err := c.DiscoveryCl.ServerGroupsAndResources() - if err != nil { - if !discovery.IsGroupDiscoveryFailedError(err) { - log.Warnf("unable to perform resource discovery: %s", err) - } else { - for group, apiGroupErr := range err.(*discovery.ErrGroupDiscoveryFailed).Groups { - log.Warnf("unable to perform resource discovery for group %s: %s", group, apiGroupErr) - } - } - } - // backwards/forwards compatibility resource factories are only // registered if they're needed, otherwise they'd overwrite the default // ones that ship with ksm diff --git a/pkg/collector/corechecks/cluster/ksm/kubernetes_state_aggregators.go b/pkg/collector/corechecks/cluster/ksm/kubernetes_state_aggregators.go index bc5d7e2dc53c0..108483936ff91 100644 --- a/pkg/collector/corechecks/cluster/ksm/kubernetes_state_aggregators.go +++ b/pkg/collector/corechecks/cluster/ksm/kubernetes_state_aggregators.go @@ -224,6 +224,16 @@ func defaultMetricAggregators() map[string]metricAggregator { cronJobAggregator := newLastCronJobAggregator() return map[string]metricAggregator{ + "kube_configmap_info": newCountObjectsAggregator( + "configmap.count", + "kube_configmap_info", + []string{"namespace"}, + ), + "kube_secret_info": newCountObjectsAggregator( + "secret.count", + "kube_secret_info", + []string{"namespace"}, + ), "kube_apiservice_labels": newCountObjectsAggregator( "apiservice.count", "kube_apiservice_labels", diff --git a/pkg/collector/corechecks/cluster/orchestrator/collector_bundle.go b/pkg/collector/corechecks/cluster/orchestrator/collector_bundle.go index 277630eda7ac4..1af903445bc52 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collector_bundle.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collector_bundle.go @@ -245,20 +245,12 @@ func (cb *CollectorBundle) prepareExtraSyncTimeout() { // synced. func (cb *CollectorBundle) Initialize() error { informersToSync := make(map[apiserver.InformerName]cache.SharedInformer) - var availableCollectors []collectors.Collector // informerSynced is a helper map which makes sure that we don't initialize the same informer twice. // i.e. the cluster and nodes resources share the same informer and using both can lead to a race condition activating both concurrently. informerSynced := map[cache.SharedInformer]struct{}{} for _, collector := range cb.collectors { collector.Init(cb.runCfg) - if !collector.IsAvailable() { - _ = cb.check.Warnf("Collector %q is unavailable, skipping it", collector.Metadata().FullName()) - continue - } - - availableCollectors = append(availableCollectors, collector) - informer := collector.Informer() if _, found := informerSynced[informer]; !found { @@ -275,8 +267,6 @@ func (cb *CollectorBundle) Initialize() error { } } - cb.collectors = availableCollectors - return apiserver.SyncInformers(informersToSync, cb.extraSyncTimeout) } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/collector.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/collector.go index c236656e1de5e..fa31a88a35c82 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/collector.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/collector.go @@ -29,14 +29,6 @@ type Collector interface { // informers and listers. Init(*CollectorRunConfig) - // IsAvailable returns whether a collector is available. - // A typical use-case is checking whether the targeted apiGroup version - // used by the collector is available in the cluster. - // Should be called after Init. - // FIXME: to be removed after collector discovery has been the default for - // some time. - IsAvailable() bool - // Metadata is used to access information describing the collector. Metadata() *CollectorMetadata diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cluster.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cluster.go index fab90bf13b379..9ac0048d2ce69 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cluster.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cluster.go @@ -61,9 +61,6 @@ func (c *ClusterCollector) Init(rcfg *collectors.CollectorRunConfig) { c.lister = c.informer.Lister() } -// IsAvailable returns whether the collector is available. -func (c *ClusterCollector) IsAvailable() bool { return true } - // Metadata is used to access information about the collector. func (c *ClusterCollector) Metadata() *collectors.CollectorMetadata { return c.metadata diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/clusterrole.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/clusterrole.go index 6e3b8a064fbf6..1173e23a06f92 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/clusterrole.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/clusterrole.go @@ -63,9 +63,6 @@ func (c *ClusterRoleCollector) Init(rcfg *collectors.CollectorRunConfig) { c.lister = c.informer.Lister() } -// IsAvailable returns whether the collector is available. -func (c *ClusterRoleCollector) IsAvailable() bool { return true } - // Metadata is used to access information about the collector. func (c *ClusterRoleCollector) Metadata() *collectors.CollectorMetadata { return c.metadata diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/clusterrolebinding.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/clusterrolebinding.go index dee60a13ed033..df854711a1a8e 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/clusterrolebinding.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/clusterrolebinding.go @@ -63,9 +63,6 @@ func (c *ClusterRoleBindingCollector) Init(rcfg *collectors.CollectorRunConfig) c.lister = c.informer.Lister() } -// IsAvailable returns whether the collector is available. -func (c *ClusterRoleBindingCollector) IsAvailable() bool { return true } - // Metadata is used to access information about the collector. func (c *ClusterRoleBindingCollector) Metadata() *collectors.CollectorMetadata { return c.metadata diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cr.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cr.go index e9d90f90c787a..fb55268daedb6 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cr.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cr.go @@ -14,10 +14,10 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" "github.com/DataDog/datadog-agent/pkg/orchestrator" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/informers" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/informers" "k8s.io/client-go/tools/cache" ) @@ -74,9 +74,6 @@ func (c *CRCollector) Init(rcfg *collectors.CollectorRunConfig) { c.lister = c.informer.Lister() } -// IsAvailable returns whether the collector is available. -func (c *CRCollector) IsAvailable() bool { return true } - // Metadata is used to access information about the collector. func (c *CRCollector) Metadata() *collectors.CollectorMetadata { return c.metadata diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/crd.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/crd.go index a698707b6ba32..257d7f8c7ff8c 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/crd.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/crd.go @@ -13,6 +13,7 @@ import ( k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" "github.com/DataDog/datadog-agent/pkg/orchestrator" "github.com/DataDog/datadog-agent/pkg/util/log" + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/informers" @@ -68,9 +69,6 @@ func (c *CRDCollector) Init(rcfg *collectors.CollectorRunConfig) { c.lister = c.informer.Lister() // return that Lister } -// IsAvailable returns whether the collector is available. -func (c *CRDCollector) IsAvailable() bool { return true } - // Metadata is used to access information about the collector. func (c *CRDCollector) Metadata() *collectors.CollectorMetadata { return c.metadata diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cronjob_v1.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cronjob_v1.go index 92129aa38d305..e39b4f5b38b65 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cronjob_v1.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cronjob_v1.go @@ -55,9 +55,6 @@ func (c *CronJobV1Collector) Init(rcfg *collectors.CollectorRunConfig) { c.lister = c.informer.Lister() } -// IsAvailable returns whether the collector is available. -func (c *CronJobV1Collector) IsAvailable() bool { return true } - // Metadata is used to access information about the collector. func (c *CronJobV1Collector) Metadata() *collectors.CollectorMetadata { return c.metadata diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cronjob_v1beta1.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cronjob_v1beta1.go index e18be40214d43..f0101e461b10b 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cronjob_v1beta1.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cronjob_v1beta1.go @@ -55,9 +55,6 @@ func (c *CronJobV1Beta1Collector) Init(rcfg *collectors.CollectorRunConfig) { c.lister = c.informer.Lister() } -// IsAvailable returns whether the collector is available. -func (c *CronJobV1Beta1Collector) IsAvailable() bool { return true } - // Metadata is used to access information about the collector. func (c *CronJobV1Beta1Collector) Metadata() *collectors.CollectorMetadata { return c.metadata diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/daemonset.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/daemonset.go index 2c82e47fe38ef..65727a9167670 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/daemonset.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/daemonset.go @@ -63,9 +63,6 @@ func (c *DaemonSetCollector) Init(rcfg *collectors.CollectorRunConfig) { c.lister = c.informer.Lister() } -// IsAvailable returns whether the collector is available. -func (c *DaemonSetCollector) IsAvailable() bool { return true } - // Metadata is used to access information about the collector. func (c *DaemonSetCollector) Metadata() *collectors.CollectorMetadata { return c.metadata diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/deployment.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/deployment.go index 90702230176ad..162c37ae3ed31 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/deployment.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/deployment.go @@ -63,9 +63,6 @@ func (c *DeploymentCollector) Init(rcfg *collectors.CollectorRunConfig) { c.lister = c.informer.Lister() } -// IsAvailable returns whether the collector is available. -func (c *DeploymentCollector) IsAvailable() bool { return true } - // Metadata is used to access information about the collector. func (c *DeploymentCollector) Metadata() *collectors.CollectorMetadata { return c.metadata diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/ingress.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/ingress.go index 7ee797d23b508..6f888c1e9f4c8 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/ingress.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/ingress.go @@ -8,18 +8,11 @@ package k8s import ( - "context" - "time" - "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" "github.com/DataDog/datadog-agent/pkg/orchestrator" - "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/retry" - netv1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" netv1Informers "k8s.io/client-go/informers/networking/v1" netv1Listers "k8s.io/client-go/listers/networking/v1" @@ -35,11 +28,10 @@ func NewIngressCollectorVersions() collectors.CollectorVersions { // IngressCollector is a collector for Kubernetes Ingresss. type IngressCollector struct { - informer netv1Informers.IngressInformer - lister netv1Listers.IngressLister - metadata *collectors.CollectorMetadata - processor *processors.Processor - retryLister func(ctx context.Context, opts metav1.ListOptions) (*netv1.IngressList, error) + informer netv1Informers.IngressInformer + lister netv1Listers.IngressLister + metadata *collectors.CollectorMetadata + processor *processors.Processor } // NewIngressCollector creates a new collector for the Kubernetes Ingress @@ -69,25 +61,6 @@ func (c *IngressCollector) Informer() cache.SharedInformer { func (c *IngressCollector) Init(rcfg *collectors.CollectorRunConfig) { c.informer = rcfg.APIClient.InformerFactory.Networking().V1().Ingresses() c.lister = c.informer.Lister() - c.retryLister = rcfg.APIClient.Cl.NetworkingV1().Ingresses("").List -} - -// IsAvailable returns whether the collector is available. -// Returns false if the networking.k8s.io/v1 API version is not available (kubernetes < 1.19). -func (c *IngressCollector) IsAvailable() bool { - var retrier retry.Retrier - if err := retrier.SetupRetrier(&retry.Config{ - Name: "NetworkV1Discovery", - AttemptMethod: c.list, - Strategy: retry.RetryCount, - RetryCount: 3, // try 3 times - RetryDelay: 1 * time.Second, // with 1 sec interval - }); err != nil { - log.Errorf("Couldn't setup api retrier: %v", err) - return false - } - - return try(&retrier) == nil } // Metadata is used to access information about the collector. @@ -118,23 +91,3 @@ func (c *IngressCollector) Run(rcfg *collectors.CollectorRunConfig) (*collectors return result, nil } - -func (c *IngressCollector) list() error { - _, err := c.retryLister(context.TODO(), metav1.ListOptions{}) - return err -} - -func try(r *retry.Retrier) error { - for { - _ = r.TriggerRetry() - switch r.RetryStatus() { - case retry.OK: - log.Debug("Queried networking.k8s.io/v1 successfully") - return nil - case retry.PermaFail: - err := r.LastError() - log.Infof("Couldn't query networking.k8s.io/v1 successfully: %s", err.Error()) - return err - } - } -} diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/job.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/job.go index 5b7b8717d61dc..d0e3494588a45 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/job.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/job.go @@ -62,9 +62,6 @@ func (c *JobCollector) Init(rcfg *collectors.CollectorRunConfig) { c.lister = c.informer.Lister() } -// IsAvailable returns whether the collector is available. -func (c *JobCollector) IsAvailable() bool { return true } - // Metadata is used to access information about the collector. func (c *JobCollector) Metadata() *collectors.CollectorMetadata { return c.metadata diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/namespace.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/namespace.go index 68897fa9e4461..2548baa51e387 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/namespace.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/namespace.go @@ -12,10 +12,10 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" "github.com/DataDog/datadog-agent/pkg/orchestrator" - corev1Informers "k8s.io/client-go/informers/core/v1" - corev1Listers "k8s.io/client-go/listers/core/v1" "k8s.io/apimachinery/pkg/labels" + corev1Informers "k8s.io/client-go/informers/core/v1" + corev1Listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" ) @@ -63,9 +63,6 @@ func (c *NamespaceCollector) Init(rcfg *collectors.CollectorRunConfig) { c.lister = c.informer.Lister() } -// IsAvailable returns whether the collector is available. -func (c *NamespaceCollector) IsAvailable() bool { return true } - // Metadata is used to access information about the collector. func (c *NamespaceCollector) Metadata() *collectors.CollectorMetadata { return c.metadata diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/node.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/node.go index 555b7a9c4f688..31c02f86aceae 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/node.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/node.go @@ -62,9 +62,6 @@ func (c *NodeCollector) Init(rcfg *collectors.CollectorRunConfig) { c.lister = c.informer.Lister() } -// IsAvailable returns whether the collector is available. -func (c *NodeCollector) IsAvailable() bool { return true } - // Metadata is used to access information about the collector. func (c *NodeCollector) Metadata() *collectors.CollectorMetadata { return c.metadata diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/persistentvolume.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/persistentvolume.go index 21ebcfa4c79c5..4d466ece1b7d2 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/persistentvolume.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/persistentvolume.go @@ -63,9 +63,6 @@ func (c *PersistentVolumeCollector) Init(rcfg *collectors.CollectorRunConfig) { c.lister = c.informer.Lister() } -// IsAvailable returns whether the collector is available. -func (c *PersistentVolumeCollector) IsAvailable() bool { return true } - // Metadata is used to access information about the collector. func (c *PersistentVolumeCollector) Metadata() *collectors.CollectorMetadata { return c.metadata diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/persistentvolumeclaim.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/persistentvolumeclaim.go index f20970ae8a732..f6fb73ebb1193 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/persistentvolumeclaim.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/persistentvolumeclaim.go @@ -63,9 +63,6 @@ func (c *PersistentVolumeClaimCollector) Init(rcfg *collectors.CollectorRunConfi c.lister = c.informer.Lister() } -// IsAvailable returns whether the collector is available. -func (c *PersistentVolumeClaimCollector) IsAvailable() bool { return true } - // Metadata is used to access information about the collector. func (c *PersistentVolumeClaimCollector) Metadata() *collectors.CollectorMetadata { return c.metadata diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/pod_unassigned.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/pod_unassigned.go index 32575b5adfa96..c4e32ba8bd926 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/pod_unassigned.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/pod_unassigned.go @@ -64,9 +64,6 @@ func (c *UnassignedPodCollector) Init(rcfg *collectors.CollectorRunConfig) { c.lister = c.informer.Lister() } -// IsAvailable returns whether the collector is available. -func (c *UnassignedPodCollector) IsAvailable() bool { return true } - // Metadata is used to access information about the collector. func (c *UnassignedPodCollector) Metadata() *collectors.CollectorMetadata { return c.metadata diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/replicaset.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/replicaset.go index 92f8f7834a0de..f359c483b81b0 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/replicaset.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/replicaset.go @@ -63,9 +63,6 @@ func (c *ReplicaSetCollector) Init(rcfg *collectors.CollectorRunConfig) { c.lister = c.informer.Lister() } -// IsAvailable returns whether the collector is available. -func (c *ReplicaSetCollector) IsAvailable() bool { return true } - // Metadata is used to access information about the collector. func (c *ReplicaSetCollector) Metadata() *collectors.CollectorMetadata { return c.metadata diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/role.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/role.go index 9af71d41c65af..663b07b4e9dc4 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/role.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/role.go @@ -62,9 +62,6 @@ func (c *RoleCollector) Init(rcfg *collectors.CollectorRunConfig) { c.lister = c.informer.Lister() } -// IsAvailable returns whether the collector is available. -func (c *RoleCollector) IsAvailable() bool { return true } - // Metadata is used to access information about the collector. func (c *RoleCollector) Metadata() *collectors.CollectorMetadata { return c.metadata diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/rolebinding.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/rolebinding.go index f406374ff3056..cff0b0c4ee6d0 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/rolebinding.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/rolebinding.go @@ -63,9 +63,6 @@ func (c *RoleBindingCollector) Init(rcfg *collectors.CollectorRunConfig) { c.lister = c.informer.Lister() } -// IsAvailable returns whether the collector is available. -func (c *RoleBindingCollector) IsAvailable() bool { return true } - // Metadata is used to access information about the collector. func (c *RoleBindingCollector) Metadata() *collectors.CollectorMetadata { return c.metadata diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/service.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/service.go index 17bb87f89256f..cf8bb5ef06950 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/service.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/service.go @@ -63,9 +63,6 @@ func (c *ServiceCollector) Init(rcfg *collectors.CollectorRunConfig) { c.lister = c.informer.Lister() } -// IsAvailable returns whether the collector is available. -func (c *ServiceCollector) IsAvailable() bool { return true } - // Metadata is used to access information about the collector. func (c *ServiceCollector) Metadata() *collectors.CollectorMetadata { return c.metadata diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/serviceaccount.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/serviceaccount.go index bd44551f0bdff..fb7085ba5bd68 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/serviceaccount.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/serviceaccount.go @@ -63,9 +63,6 @@ func (c *ServiceAccountCollector) Init(rcfg *collectors.CollectorRunConfig) { c.lister = c.informer.Lister() } -// IsAvailable returns whether the collector is available. -func (c *ServiceAccountCollector) IsAvailable() bool { return true } - // Metadata is used to access information about the collector. func (c *ServiceAccountCollector) Metadata() *collectors.CollectorMetadata { return c.metadata diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/statefulset.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/statefulset.go index 4c0447ba8ed65..65ebe046fded6 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/statefulset.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/statefulset.go @@ -63,9 +63,6 @@ func (c *StatefulSetCollector) Init(rcfg *collectors.CollectorRunConfig) { c.lister = c.informer.Lister() } -// IsAvailable returns whether the collector is available. -func (c *StatefulSetCollector) IsAvailable() bool { return true } - // Metadata is used to access information about the collector. func (c *StatefulSetCollector) Metadata() *collectors.CollectorMetadata { return c.metadata diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/verticalpodautoscaler.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/verticalpodautoscaler.go index 0c3a941f10d8e..cd205d327f5a6 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/verticalpodautoscaler.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/verticalpodautoscaler.go @@ -41,7 +41,7 @@ func NewVerticalPodAutoscalerCollector() *VerticalPodAutoscalerCollector { return &VerticalPodAutoscalerCollector{ metadata: &collectors.CollectorMetadata{ IsDefaultVersion: true, - IsStable: false, + IsStable: true, IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, @@ -64,9 +64,6 @@ func (c *VerticalPodAutoscalerCollector) Init(rcfg *collectors.CollectorRunConfi c.lister = c.informer.Lister() } -// IsAvailable returns whether the collector is available. -func (c *VerticalPodAutoscalerCollector) IsAvailable() bool { return true } - // Metadata is used to access information about the collector. func (c *VerticalPodAutoscalerCollector) Metadata() *collectors.CollectorMetadata { return c.metadata diff --git a/pkg/collector/corechecks/cluster/orchestrator/orchestrator_test.go b/pkg/collector/corechecks/cluster/orchestrator/orchestrator_test.go deleted file mode 100644 index 60252abd3e839..0000000000000 --- a/pkg/collector/corechecks/cluster/orchestrator/orchestrator_test.go +++ /dev/null @@ -1,77 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build kubeapiserver && orchestrator - -package orchestrator - -import ( - "context" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/tools/cache" - - "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - corev1 "k8s.io/api/core/v1" -) - -// TestOrchestratorCheckSafeReSchedule close simulates the check being unscheduled and rescheduled again -func TestOrchestratorCheckSafeReSchedule(t *testing.T) { - var wg sync.WaitGroup - - client := fake.NewSimpleClientset() - informerFactory := informers.NewSharedInformerFactory(client, 0) - cl := &apiserver.APIClient{Cl: client, InformerFactory: informerFactory, UnassignedPodInformerFactory: informerFactory} - orchCheck := OrchestratorFactory().(*OrchestratorCheck) - orchCheck.apiClient = cl - - bundle := NewCollectorBundle(orchCheck) - err := bundle.Initialize() - assert.NoError(t, err) - - wg.Add(2) - - nodeInformer := informerFactory.Core().V1().Nodes().Informer() - nodeInformer.AddEventHandler(&cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - wg.Done() - }, - }) - - writeNode(t, client, "1") - - // getting rescheduled. - orchCheck.Cancel() - // This part is not optimal as the cancel closes a channel which gets propagated everywhere that might take some time. - // If things are too fast the close is not getting propagated fast enough. - // But even if we are too fast and don't catch that part it will not lead to a false positive - time.Sleep(1 * time.Millisecond) - err = bundle.Initialize() - assert.NoError(t, err) - writeNode(t, client, "2") - - wg.Wait() -} - -func writeNode(t *testing.T, client *fake.Clientset, version string) { - kubeN := corev1.Node{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: version, - UID: types.UID("126430c6-5e57-11ea-91d5-42010a8400c6-" + version), - Name: "another-system-" + version, - }, - } - _, err := client.CoreV1().Nodes().Create(context.TODO(), &kubeN, metav1.CreateOptions{}) - assert.NoError(t, err) -} diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/common.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/common.go index 60f3ce5c02456..7fa10c93b02e3 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/common.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/common.go @@ -8,6 +8,9 @@ package k8s import ( + "fmt" + "strings" + model "github.com/DataDog/agent-payload/v5/process" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -69,3 +72,8 @@ func extractLabelSelector(ls *metav1.LabelSelector) []*model.LabelSelectorRequir return labelSelectors } + +// createConditionTag returns tags in a standard format for conditions +func createConditionTag(conditionType string, conditionStatus string) string { + return fmt.Sprintf("kube_condition_%s:%s", strings.ToLower(conditionType), strings.ToLower(conditionStatus)) +} diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/daemonset.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/daemonset.go index 4a4c976442fb7..b65d9c25ddb98 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/daemonset.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/daemonset.go @@ -8,10 +8,10 @@ package k8s import ( - "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" + model "github.com/DataDog/agent-payload/v5/process" appsv1 "k8s.io/api/apps/v1" - model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" ) // ExtractDaemonSet returns the protobuf model corresponding to a Kubernetes @@ -48,8 +48,41 @@ func ExtractDaemonSet(ds *appsv1.DaemonSet) *model.DaemonSet { daemonSet.Spec.Selectors = extractLabelSelector(ds.Spec.Selector) } + if len(ds.Status.Conditions) > 0 { + dsConditions, conditionTags := extractDaemonSetConditions(ds) + daemonSet.Conditions = dsConditions + daemonSet.Tags = append(daemonSet.Tags, conditionTags...) + } + daemonSet.Spec.ResourceRequirements = ExtractPodTemplateResourceRequirements(ds.Spec.Template) daemonSet.Tags = append(daemonSet.Tags, transformers.RetrieveUnifiedServiceTags(ds.ObjectMeta.Labels)...) return &daemonSet } + +// extractDaemonSetConditions iterates over daemonset conditions and returns: +// - the payload representation of those conditions +// - the list of tags that will enable pod filtering by condition +func extractDaemonSetConditions(p *appsv1.DaemonSet) ([]*model.DaemonSetCondition, []string) { + conditions := make([]*model.DaemonSetCondition, 0, len(p.Status.Conditions)) + conditionTags := make([]string, 0, len(p.Status.Conditions)) + + for _, condition := range p.Status.Conditions { + c := &model.DaemonSetCondition{ + Message: condition.Message, + Reason: condition.Reason, + Status: string(condition.Status), + Type: string(condition.Type), + } + if !condition.LastTransitionTime.IsZero() { + c.LastTransitionTime = condition.LastTransitionTime.Unix() + } + + conditions = append(conditions, c) + + conditionTag := createConditionTag(string(condition.Type), string(condition.Status)) + conditionTags = append(conditionTags, conditionTag) + } + + return conditions, conditionTags +} diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/daemonset_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/daemonset_test.go index 7e4fdfe44f531..8fd49562f0dbf 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/daemonset_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/daemonset_test.go @@ -8,9 +8,12 @@ package k8s import ( + "time" + model "github.com/DataDog/agent-payload/v5/process" "github.com/stretchr/testify/assert" v1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -19,6 +22,7 @@ import ( func TestExtractDaemonset(t *testing.T) { testIntOrStr := intstr.FromString("1%") + timestamp := metav1.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)) // 1389744000 tests := map[string]struct { input v1.DaemonSet @@ -51,6 +55,15 @@ func TestExtractDaemonset(t *testing.T) { }, }, Status: v1.DaemonSetStatus{ + Conditions: []v1.DaemonSetCondition{ + { + Type: "Test", + Status: corev1.ConditionFalse, + LastTransitionTime: timestamp, + Reason: "test reason", + Message: "test message", + }, + }, CurrentNumberScheduled: 1, NumberReady: 1, }, @@ -59,6 +72,16 @@ func TestExtractDaemonset(t *testing.T) { Name: "daemonset", Namespace: "namespace", }, + Conditions: []*model.DaemonSetCondition{ + { + Type: "Test", + Status: string(corev1.ConditionFalse), + LastTransitionTime: timestamp.Unix(), + Reason: "test reason", + Message: "test message", + }, + }, + Tags: []string{"kube_condition_test:false"}, Spec: &model.DaemonSetSpec{ DeploymentStrategy: "RollingUpdate", MaxUnavailable: "1%", diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/deployment.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/deployment.go index a433e040b34eb..6b9d4f8dfd413 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/deployment.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/deployment.go @@ -9,6 +9,7 @@ package k8s import ( model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" appsv1 "k8s.io/api/apps/v1" @@ -48,6 +49,12 @@ func ExtractDeployment(d *appsv1.Deployment) *model.Deployment { deploy.UnavailableReplicas = d.Status.UnavailableReplicas deploy.ConditionMessage = extractDeploymentConditionMessage(d.Status.Conditions) + if len(d.Status.Conditions) > 0 { + deployConditions, conditionTags := extractDeploymentConditions(d) + deploy.Conditions = deployConditions + deploy.Tags = append(deploy.Tags, conditionTags...) + } + deploy.ResourceRequirements = ExtractPodTemplateResourceRequirements(d.Spec.Template) deploy.Tags = append(deploy.Tags, transformers.RetrieveUnifiedServiceTags(d.ObjectMeta.Labels)...) @@ -80,3 +87,34 @@ func extractDeploymentConditionMessage(conditions []appsv1.DeploymentCondition) } return "" } + +// extractDeploymentConditions iterates over deployment conditions and returns: +// - the payload representation of those conditions +// - the list of tags that will enable pod filtering by condition +func extractDeploymentConditions(p *appsv1.Deployment) ([]*model.DeploymentCondition, []string) { + conditions := make([]*model.DeploymentCondition, 0, len(p.Status.Conditions)) + conditionTags := make([]string, 0, len(p.Status.Conditions)) + + for _, condition := range p.Status.Conditions { + c := &model.DeploymentCondition{ + Message: condition.Message, + Reason: condition.Reason, + Status: string(condition.Status), + Type: string(condition.Type), + } + if !condition.LastTransitionTime.IsZero() { + c.LastTransitionTime = condition.LastTransitionTime.Unix() + } + + if !condition.LastUpdateTime.IsZero() { + c.LastUpdateTime = condition.LastUpdateTime.Unix() + } + + conditions = append(conditions, c) + + conditionTag := createConditionTag(string(condition.Type), string(condition.Status)) + conditionTags = append(conditionTags, conditionTag) + } + + return conditions, conditionTags +} diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/deployment_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/deployment_test.go index ff0cc5fe44fc0..ebe9258c77a78 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/deployment_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/deployment_test.go @@ -13,6 +13,7 @@ import ( "time" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/stretchr/testify/assert" @@ -113,6 +114,24 @@ func TestExtractDeployment(t *testing.T) { AvailableReplicas: 2, UnavailableReplicas: 0, ConditionMessage: `ReplicaSet "orchestrator-intake-6d65b45d4d" has timed out progressing.`, + Conditions: []*model.DeploymentCondition{ + { + Type: string(appsv1.DeploymentAvailable), + Status: string(corev1.ConditionFalse), + Reason: "MinimumReplicasAvailable", + Message: "Deployment has minimum availability.", + }, + { + Type: string(appsv1.DeploymentProgressing), + Status: string(corev1.ConditionFalse), + Reason: "NewReplicaSetAvailable", + Message: `ReplicaSet "orchestrator-intake-6d65b45d4d" has timed out progressing.`, + }, + }, + Tags: []string{ + "kube_condition_available:false", + "kube_condition_progressing:false", + }, }, }, "empty deploy": {input: appsv1.Deployment{}, expected: model.Deployment{Metadata: &model.Metadata{}, ReplicasDesired: 1}}, diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/job.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/job.go index ced39d2fe4251..3227b52a5c839 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/job.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/job.go @@ -9,9 +9,9 @@ package k8s import ( model "github.com/DataDog/agent-payload/v5/process" - "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" - batchv1 "k8s.io/api/batch/v1" + + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" ) // ExtractJob returns the protobuf model corresponding to a Kubernetes Job @@ -54,6 +54,12 @@ func ExtractJob(j *batchv1.Job) *model.Job { job.Status.CompletionTime = j.Status.CompletionTime.Unix() } + if len(j.Status.Conditions) > 0 { + jConditions, conditionTags := extractJobConditions(j) + job.Conditions = jConditions + job.Tags = append(job.Tags, conditionTags...) + } + job.Spec.ResourceRequirements = ExtractPodTemplateResourceRequirements(j.Spec.Template) job.Tags = append(job.Tags, transformers.RetrieveUnifiedServiceTags(j.ObjectMeta.Labels)...) @@ -68,3 +74,34 @@ func extractJobConditionMessage(conditions []batchv1.JobCondition) string { } return "" } + +// extractJobConditions iterates over job conditions and returns: +// - the payload representation of those conditions +// - the list of tags that will enable pod filtering by condition +func extractJobConditions(p *batchv1.Job) ([]*model.JobCondition, []string) { + conditions := make([]*model.JobCondition, 0, len(p.Status.Conditions)) + conditionTags := make([]string, 0, len(p.Status.Conditions)) + + for _, condition := range p.Status.Conditions { + c := &model.JobCondition{ + Message: condition.Message, + Reason: condition.Reason, + Status: string(condition.Status), + Type: string(condition.Type), + } + if !condition.LastTransitionTime.IsZero() { + c.LastTransitionTime = condition.LastTransitionTime.Unix() + } + + if !condition.LastProbeTime.IsZero() { + c.LastProbeTime = condition.LastProbeTime.Unix() + } + + conditions = append(conditions, c) + + conditionTag := createConditionTag(string(condition.Type), string(condition.Status)) + conditionTags = append(conditionTags, conditionTag) + } + + return conditions, conditionTags +} diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/job_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/job_test.go index dbf4eb226086b..57d7777bc23ef 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/job_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/job_test.go @@ -12,6 +12,7 @@ import ( "time" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/util/pointer" "github.com/stretchr/testify/assert" @@ -183,6 +184,15 @@ func TestExtractJob(t *testing.T) { Succeeded: 1, StartTime: startTime.Unix(), }, + Conditions: []*model.JobCondition{ + { + LastProbeTime: lastTransitionTime.Unix(), + LastTransitionTime: lastTransitionTime.Unix(), + Status: string(corev1.ConditionTrue), + Type: string(batchv1.JobComplete), + }, + }, + Tags: []string{"kube_condition_complete:true"}, }, }, "job started by cronjob (failed)": { @@ -268,6 +278,17 @@ func TestExtractJob(t *testing.T) { Failed: 1, StartTime: startTime.Unix(), }, + Conditions: []*model.JobCondition{ + { + LastProbeTime: lastTransitionTime.Unix(), + LastTransitionTime: lastTransitionTime.Unix(), + Message: "Job has reached the specified backoff limit", + Reason: "BackoffLimitExceeded", + Status: string(corev1.ConditionTrue), + Type: string(batchv1.JobFailed), + }, + }, + Tags: []string{"kube_condition_failed:true"}, }, }, "job with resources": { diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/namespace.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/namespace.go index efd82d99155e5..44e5dc4eef397 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/namespace.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/namespace.go @@ -9,8 +9,9 @@ package k8s import ( model "github.com/DataDog/agent-payload/v5/process" - "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" corev1 "k8s.io/api/core/v1" + + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" ) // ExtractNamespace returns the protobuf model corresponding to a Kubernetes Namespace resource. @@ -21,6 +22,11 @@ func ExtractNamespace(ns *corev1.Namespace) *model.Namespace { Status: string(ns.Status.Phase), ConditionMessage: getNamespaceConditionMessage(ns), } + if len(ns.Status.Conditions) > 0 { + namespaceConditions, conditionTags := extractNamespaceConditions(ns) + n.Conditions = namespaceConditions + n.Tags = append(n.Tags, conditionTags...) + } n.Tags = append(n.Tags, transformers.RetrieveUnifiedServiceTags(ns.ObjectMeta.Labels)...) @@ -58,3 +64,30 @@ func getNamespaceConditionMessage(n *corev1.Namespace) string { } return "" } + +// extractNamespaceConditions iterates over namespace conditions and returns: +// - the payload representation of those conditions +// - the list of tags that will enable pod filtering by condition +func extractNamespaceConditions(n *corev1.Namespace) ([]*model.NamespaceCondition, []string) { + conditions := make([]*model.NamespaceCondition, 0, len(n.Status.Conditions)) + conditionTags := make([]string, 0, len(n.Status.Conditions)) + + for _, condition := range n.Status.Conditions { + c := &model.NamespaceCondition{ + Message: condition.Message, + Reason: condition.Reason, + Status: string(condition.Status), + Type: string(condition.Type), + } + if !condition.LastTransitionTime.IsZero() { + c.LastTransitionTime = condition.LastTransitionTime.Unix() + } + + conditions = append(conditions, c) + + conditionTag := createConditionTag(string(condition.Type), string(condition.Status)) + conditionTags = append(conditionTags, conditionTag) + } + + return conditions, conditionTags +} diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/namespace_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/namespace_test.go index b92f3d8981605..0b024acc57c8f 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/namespace_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/namespace_test.go @@ -77,6 +77,28 @@ func TestExtractNamespace(t *testing.T) { }, Status: "a-phase", ConditionMessage: "right msg", + Conditions: []*model.NamespaceCondition{ + { + Type: "NamespaceFinalizersRemaining", + Status: "False", + Message: "wrong msg", + }, + { + Type: "NamespaceDeletionContentFailure", + Status: "True", + Message: "also the wrong msg", + }, + { + Type: "NamespaceDeletionDiscoveryFailure", + Status: "True", + Message: "right msg", + }, + }, + Tags: []string{ + "kube_condition_namespacefinalizersremaining:false", + "kube_condition_namespacedeletioncontentfailure:true", + "kube_condition_namespacedeletiondiscoveryfailure:true", + }, }, }, "nil-safety": { diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/persistentvolume.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/persistentvolume.go index fabe7209603aa..fea7eecdfd1a0 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/persistentvolume.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/persistentvolume.go @@ -73,7 +73,7 @@ func ExtractPersistentVolume(pv *corev1.PersistentVolume) *model.PersistentVolum message.Spec.NodeAffinity = selectorTerms } - message.Spec.PersistentVolumeType = extractVolumeSource(pv.Spec.PersistentVolumeSource) + addVolumeSource(message, pv.Spec.PersistentVolumeSource) st := pv.Spec.Capacity.Storage() if !st.IsZero() { @@ -111,38 +111,133 @@ func extractPVSelector(ls []corev1.NodeSelectorRequirement) []*model.LabelSelect return labelSelectors } -func extractVolumeSource(volume corev1.PersistentVolumeSource) string { +func addVolumeSource(pvModel *model.PersistentVolume, volume corev1.PersistentVolumeSource) { switch { case volume.HostPath != nil: - return "HostPath" + pvModel.Spec.PersistentVolumeType = "HostPath" case volume.GCEPersistentDisk != nil: - return "GCEPersistentDisk" + pvModel.Spec.PersistentVolumeType = "GCEPersistentDisk" + pvModel.Spec.PersistentVolumeSource = &model.PersistentVolumeSource{ + GcePersistentDisk: extractGCEPersistentDiskVolumeSource(volume), + } case volume.AWSElasticBlockStore != nil: - return "AWSElasticBlockStore" + pvModel.Spec.PersistentVolumeType = "AWSElasticBlockStore" + pvModel.Spec.PersistentVolumeSource = &model.PersistentVolumeSource{ + AwsElasticBlockStore: extractAWSElasticBlockStoreVolumeSource(volume), + } case volume.Quobyte != nil: - return "Quobyte" + pvModel.Spec.PersistentVolumeType = "Quobyte" case volume.Cinder != nil: - return "Cinder" + pvModel.Spec.PersistentVolumeType = "Cinder" case volume.PhotonPersistentDisk != nil: - return "PhotonPersistentDisk" + pvModel.Spec.PersistentVolumeType = "PhotonPersistentDisk" case volume.PortworxVolume != nil: - return "PortworxVolume" + pvModel.Spec.PersistentVolumeType = "PortworxVolume" case volume.ScaleIO != nil: - return "ScaleIO" + pvModel.Spec.PersistentVolumeType = "ScaleIO" case volume.CephFS != nil: - return "CephFS" + pvModel.Spec.PersistentVolumeType = "CephFS" case volume.StorageOS != nil: - return "StorageOS" + pvModel.Spec.PersistentVolumeType = "StorageOS" case volume.FC != nil: - return "FC" + pvModel.Spec.PersistentVolumeType = "FC" case volume.AzureFile != nil: - return "AzureFile" + pvModel.Spec.PersistentVolumeType = "AzureFile" + pvModel.Spec.PersistentVolumeSource = &model.PersistentVolumeSource{ + AzureFile: extractAzureFilePersistentVolumeSource(volume), + } + case volume.AzureDisk != nil: + pvModel.Spec.PersistentVolumeType = "AzureDisk" + pvModel.Spec.PersistentVolumeSource = &model.PersistentVolumeSource{ + AzureDisk: extractAzureDiskVolumeSource(volume), + } case volume.FlexVolume != nil: - return "FlexVolume" + pvModel.Spec.PersistentVolumeType = "FlexVolume" case volume.Flocker != nil: - return "Flocker" + pvModel.Spec.PersistentVolumeType = "Flocker" case volume.CSI != nil: - return "CSI" + pvModel.Spec.PersistentVolumeType = "CSI" + pvModel.Spec.PersistentVolumeSource = &model.PersistentVolumeSource{ + Csi: extractCSIVolumeSource(volume), + } + default: + pvModel.Spec.PersistentVolumeType = "" + } +} + +func extractGCEPersistentDiskVolumeSource(volume corev1.PersistentVolumeSource) *model.GCEPersistentDiskVolumeSource { + return &model.GCEPersistentDiskVolumeSource{ + PdName: volume.GCEPersistentDisk.PDName, + FsType: volume.GCEPersistentDisk.FSType, + Partition: volume.GCEPersistentDisk.Partition, + ReadOnly: volume.GCEPersistentDisk.ReadOnly, + } +} + +func extractAWSElasticBlockStoreVolumeSource(volume corev1.PersistentVolumeSource) *model.AWSElasticBlockStoreVolumeSource { + return &model.AWSElasticBlockStoreVolumeSource{ + VolumeID: volume.AWSElasticBlockStore.VolumeID, + FsType: volume.AWSElasticBlockStore.FSType, + Partition: volume.AWSElasticBlockStore.Partition, + ReadOnly: volume.AWSElasticBlockStore.ReadOnly, + } +} + +func extractAzureFilePersistentVolumeSource(volume corev1.PersistentVolumeSource) *model.AzureFilePersistentVolumeSource { + m := &model.AzureFilePersistentVolumeSource{ + SecretName: volume.AzureFile.SecretName, + ShareName: volume.AzureFile.ShareName, + ReadOnly: volume.AzureFile.ReadOnly, + } + if volume.AzureFile.SecretNamespace != nil { + m.SecretNamespace = *volume.AzureFile.SecretNamespace + } + return m +} + +func extractAzureDiskVolumeSource(volume corev1.PersistentVolumeSource) *model.AzureDiskVolumeSource { + m := &model.AzureDiskVolumeSource{ + DiskName: volume.AzureDisk.DiskName, + DiskURI: volume.AzureDisk.DataDiskURI, + } + if volume.AzureDisk.CachingMode != nil { + m.CachingMode = string(*volume.AzureDisk.CachingMode) + } + if volume.AzureDisk.FSType != nil { + m.FsType = *volume.AzureDisk.FSType + } + if volume.AzureDisk.ReadOnly != nil { + m.ReadOnly = *volume.AzureDisk.ReadOnly + } + if volume.AzureDisk.Kind != nil { + m.Kind = string(*volume.AzureDisk.Kind) + } + + return m +} + +func extractCSIVolumeSource(volume corev1.PersistentVolumeSource) *model.CSIVolumeSource { + m := &model.CSIVolumeSource{ + Driver: volume.CSI.Driver, + VolumeHandle: volume.CSI.VolumeHandle, + ReadOnly: volume.CSI.ReadOnly, + FsType: volume.CSI.FSType, + VolumeAttributes: volume.CSI.VolumeAttributes, + } + + m.ControllerPublishSecretRef = extractSecretReference(volume.CSI.ControllerPublishSecretRef) + m.NodeStageSecretRef = extractSecretReference(volume.CSI.NodeStageSecretRef) + m.NodePublishSecretRef = extractSecretReference(volume.CSI.NodePublishSecretRef) + m.ControllerExpandSecretRef = extractSecretReference(volume.CSI.ControllerExpandSecretRef) + return m +} + +func extractSecretReference(ref *corev1.SecretReference) *model.SecretReference { + if ref == nil { + return nil + } + return &model.SecretReference{ + Name: ref.Name, + Namespace: ref.Namespace, } - return "" } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/persistentvolume_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/persistentvolume_test.go index 14c66e516766f..07cf4591941bf 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/persistentvolume_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/persistentvolume_test.go @@ -8,6 +8,7 @@ package k8s import ( + "strings" "testing" "time" @@ -20,150 +21,296 @@ import ( "k8s.io/apimachinery/pkg/types" ) +var ( + creationTime = metav1.NewTime(time.Date(2021, time.April, 16, 14, 30, 0, 0, time.UTC)) + filesystem = corev1.PersistentVolumeFilesystem + parsedResource = resource.MustParse("2Gi") +) + func TestExtractPersistentVolume(t *testing.T) { - creationTime := metav1.NewTime(time.Date(2021, time.April, 16, 14, 30, 0, 0, time.UTC)) - filesystem := corev1.PersistentVolumeFilesystem - parsedResource := resource.MustParse("2Gi") + basicInputPV := newInputPV() + basicExpectedPV := newExpectedPV() tests := map[string]struct { - input corev1.PersistentVolume - expected model.PersistentVolume + basicInputPV corev1.PersistentVolume + inputSource corev1.PersistentVolumeSource + basicExpectedPV model.PersistentVolume + expectedSource *model.PersistentVolumeSource + expectedType string }{ "full pv": { - input: corev1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - "annotation": "my-annotation", + basicInputPV: basicInputPV, + basicExpectedPV: basicExpectedPV, + expectedType: "", + }, + "gce": { + basicInputPV: basicInputPV, + inputSource: corev1.PersistentVolumeSource{ + GCEPersistentDisk: &corev1.GCEPersistentDiskVolumeSource{ + PDName: "GCE", + FSType: "GCE", + Partition: 10, + ReadOnly: false, + }, + }, + basicExpectedPV: basicExpectedPV, + expectedSource: &model.PersistentVolumeSource{ + GcePersistentDisk: &model.GCEPersistentDiskVolumeSource{ + PdName: "GCE", + FsType: "GCE", + Partition: 10, + ReadOnly: false, + }, + }, + expectedType: "GCEPersistentDisk", + }, + "aws": { + basicInputPV: basicInputPV, + inputSource: corev1.PersistentVolumeSource{ + AWSElasticBlockStore: &corev1.AWSElasticBlockStoreVolumeSource{ + VolumeID: "abc", + FSType: "aws", + Partition: 10, + ReadOnly: false, + }, + }, + basicExpectedPV: basicExpectedPV, + expectedSource: &model.PersistentVolumeSource{ + AwsElasticBlockStore: &model.AWSElasticBlockStoreVolumeSource{ + VolumeID: "abc", + FsType: "aws", + Partition: 10, + ReadOnly: false, + }, + }, + expectedType: "AWSElasticBlockStore", + }, + "azure file": { + basicInputPV: basicInputPV, + inputSource: corev1.PersistentVolumeSource{ + AzureFile: &corev1.AzureFilePersistentVolumeSource{ + SecretName: "secret", + ShareName: "share", + ReadOnly: false, + SecretNamespace: toPt("default"), + }, + }, + basicExpectedPV: basicExpectedPV, + expectedSource: &model.PersistentVolumeSource{ + AzureFile: &model.AzureFilePersistentVolumeSource{ + SecretName: "secret", + ShareName: "share", + ReadOnly: false, + SecretNamespace: "default", + }, + }, + expectedType: "AzureFile", + }, + "azure disk": { + basicInputPV: basicInputPV, + inputSource: corev1.PersistentVolumeSource{ + AzureDisk: &corev1.AzureDiskVolumeSource{ + DiskName: "disk", + DataDiskURI: "/home", + CachingMode: toPt(corev1.AzureDataDiskCachingMode("default")), + FSType: toPt("az"), + }, + }, + basicExpectedPV: basicExpectedPV, + expectedSource: &model.PersistentVolumeSource{ + AzureDisk: &model.AzureDiskVolumeSource{ + DiskName: "disk", + DiskURI: "/home", + CachingMode: "default", + FsType: "az", + }, + }, + expectedType: "AzureDisk", + }, + "csi": { + basicInputPV: basicInputPV, + inputSource: corev1.PersistentVolumeSource{ + CSI: &corev1.CSIPersistentVolumeSource{ + Driver: "csi", + ReadOnly: false, + FSType: "csi", + VolumeHandle: "handle", + VolumeAttributes: map[string]string{ + "csi": "test", + "namespace": "default", }, - CreationTimestamp: creationTime, - Labels: map[string]string{ - "app": "my-app", + NodeStageSecretRef: &corev1.SecretReference{ + Namespace: "default", + Name: "node_stage", }, - Finalizers: []string{"foo.com/x", metav1.FinalizerOrphanDependents, "bar.com/y"}, - Name: "pv", - Namespace: "project", - ResourceVersion: "220593670", - UID: types.UID("0ff96226-578d-4679-b3c8-72e8a485c0ef"), }, - Spec: corev1.PersistentVolumeSpec{ - MountOptions: []string{"ro", "soft"}, - Capacity: corev1.ResourceList{corev1.ResourceStorage: parsedResource}, - PersistentVolumeSource: corev1.PersistentVolumeSource{ - GCEPersistentDisk: &corev1.GCEPersistentDiskVolumeSource{ - PDName: "GCE", - FSType: "GCE", - Partition: 10, - ReadOnly: false, - }, - }, - AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany, corev1.ReadWriteOnce}, - ClaimRef: &corev1.ObjectReference{ - Namespace: "test", - Name: "test-pv", + }, + basicExpectedPV: basicExpectedPV, + expectedSource: &model.PersistentVolumeSource{ + Csi: &model.CSIVolumeSource{ + Driver: "csi", + ReadOnly: false, + FsType: "csi", + VolumeHandle: "handle", + VolumeAttributes: map[string]string{ + "csi": "test", + "namespace": "default", }, - PersistentVolumeReclaimPolicy: corev1.PersistentVolumeReclaimRetain, - StorageClassName: "gold", - VolumeMode: &filesystem, - NodeAffinity: &corev1.VolumeNodeAffinity{ - Required: &corev1.NodeSelector{ - NodeSelectorTerms: []corev1.NodeSelectorTerm{ - { - MatchExpressions: []corev1.NodeSelectorRequirement{ - { - Key: "test-key3", - Operator: corev1.NodeSelectorOpIn, - Values: []string{"test-value1", "test-value3"}, - }, - }, - MatchFields: []corev1.NodeSelectorRequirement{ - { - Key: "test-key2", - Operator: corev1.NodeSelectorOpIn, - Values: []string{"test-value0", "test-value2"}, - }, - }, - }, - { - MatchExpressions: []corev1.NodeSelectorRequirement{ - { - Key: "test-key3", - Operator: corev1.NodeSelectorOpIn, - Values: []string{"test-value1", "test-value3"}, - }, - }}, - }, - }, + NodeStageSecretRef: &model.SecretReference{ + Namespace: "default", + Name: "node_stage", }, }, - Status: corev1.PersistentVolumeStatus{ - Phase: corev1.VolumePending, - Message: "test", - Reason: "test", - }, }, - expected: model.PersistentVolume{ - Metadata: &model.Metadata{ - Annotations: []string{"annotation:my-annotation"}, - CreationTimestamp: creationTime.Unix(), - Labels: []string{"app:my-app"}, - Finalizers: []string{"foo.com/x", metav1.FinalizerOrphanDependents, "bar.com/y"}, - Name: "pv", - Namespace: "project", - ResourceVersion: "220593670", - Uid: "0ff96226-578d-4679-b3c8-72e8a485c0ef", + expectedType: "CSI", + }, + } + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + tc.basicInputPV.Spec.PersistentVolumeSource = tc.inputSource + tc.basicExpectedPV.Spec.PersistentVolumeType = tc.expectedType + tc.basicExpectedPV.Spec.PersistentVolumeSource = tc.expectedSource + tc.basicExpectedPV.Tags = append(tc.basicExpectedPV.Tags, "pv_type:"+strings.ToLower(tc.expectedType)) + assert.Equal(t, &tc.basicExpectedPV, ExtractPersistentVolume(&tc.basicInputPV)) + }) + } +} + +func newInputPV() corev1.PersistentVolume { + return corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "annotation": "my-annotation", + }, + CreationTimestamp: creationTime, + Labels: map[string]string{ + "app": "my-app", + }, + Finalizers: []string{"foo.com/x", metav1.FinalizerOrphanDependents, "bar.com/y"}, + Name: "pv", + Namespace: "project", + ResourceVersion: "220593670", + UID: types.UID("0ff96226-578d-4679-b3c8-72e8a485c0ef"), + }, + Spec: corev1.PersistentVolumeSpec{ + MountOptions: []string{"ro", "soft"}, + Capacity: corev1.ResourceList{corev1.ResourceStorage: parsedResource}, + PersistentVolumeSource: corev1.PersistentVolumeSource{ + GCEPersistentDisk: &corev1.GCEPersistentDiskVolumeSource{ + PDName: "GCE", + FSType: "GCE", + Partition: 10, + ReadOnly: false, }, - Spec: &model.PersistentVolumeSpec{ - Capacity: map[string]int64{string(corev1.ResourceStorage): parsedResource.Value()}, - PersistentVolumeType: "GCEPersistentDisk", - AccessModes: []string{string(corev1.ReadWriteMany), string(corev1.ReadWriteOnce)}, - ClaimRef: &model.ObjectReference{ - Namespace: "test", - Name: "test-pv", - }, - PersistentVolumeReclaimPolicy: string(corev1.PersistentVolumeReclaimRetain), - StorageClassName: "gold", - MountOptions: []string{"ro", "soft"}, - VolumeMode: string(filesystem), - NodeAffinity: []*model.NodeSelectorTerm{ + }, + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany, corev1.ReadWriteOnce}, + ClaimRef: &corev1.ObjectReference{ + Namespace: "test", + Name: "test-pv", + }, + PersistentVolumeReclaimPolicy: corev1.PersistentVolumeReclaimRetain, + StorageClassName: "gold", + VolumeMode: &filesystem, + NodeAffinity: &corev1.VolumeNodeAffinity{ + Required: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ { - MatchExpressions: []*model.LabelSelectorRequirement{ + MatchExpressions: []corev1.NodeSelectorRequirement{ { Key: "test-key3", - Operator: string(corev1.NodeSelectorOpIn), + Operator: corev1.NodeSelectorOpIn, Values: []string{"test-value1", "test-value3"}, }, }, - MatchFields: []*model.LabelSelectorRequirement{ + MatchFields: []corev1.NodeSelectorRequirement{ { Key: "test-key2", - Operator: string(corev1.NodeSelectorOpIn), + Operator: corev1.NodeSelectorOpIn, Values: []string{"test-value0", "test-value2"}, }, }, }, { - MatchExpressions: []*model.LabelSelectorRequirement{ + MatchExpressions: []corev1.NodeSelectorRequirement{ { Key: "test-key3", - Operator: string(corev1.NodeSelectorOpIn), + Operator: corev1.NodeSelectorOpIn, Values: []string{"test-value1", "test-value3"}, }, - }, + }}, + }, + }, + }, + }, + Status: corev1.PersistentVolumeStatus{ + Phase: corev1.VolumePending, + Message: "test", + Reason: "test", + }, + } +} + +func newExpectedPV() model.PersistentVolume { + return model.PersistentVolume{ + Metadata: &model.Metadata{ + Annotations: []string{"annotation:my-annotation"}, + CreationTimestamp: creationTime.Unix(), + Labels: []string{"app:my-app"}, + Finalizers: []string{"foo.com/x", metav1.FinalizerOrphanDependents, "bar.com/y"}, + Name: "pv", + Namespace: "project", + ResourceVersion: "220593670", + Uid: "0ff96226-578d-4679-b3c8-72e8a485c0ef", + }, + Spec: &model.PersistentVolumeSpec{ + Capacity: map[string]int64{string(corev1.ResourceStorage): parsedResource.Value()}, + PersistentVolumeType: "", + AccessModes: []string{string(corev1.ReadWriteMany), string(corev1.ReadWriteOnce)}, + ClaimRef: &model.ObjectReference{ + Namespace: "test", + Name: "test-pv", + }, + PersistentVolumeReclaimPolicy: string(corev1.PersistentVolumeReclaimRetain), + StorageClassName: "gold", + MountOptions: []string{"ro", "soft"}, + VolumeMode: string(filesystem), + NodeAffinity: []*model.NodeSelectorTerm{ + { + MatchExpressions: []*model.LabelSelectorRequirement{ + { + Key: "test-key3", + Operator: string(corev1.NodeSelectorOpIn), + Values: []string{"test-value1", "test-value3"}, + }, + }, + MatchFields: []*model.LabelSelectorRequirement{ + { + Key: "test-key2", + Operator: string(corev1.NodeSelectorOpIn), + Values: []string{"test-value0", "test-value2"}, }, }, }, - Status: &model.PersistentVolumeStatus{ - Phase: string(corev1.VolumePending), - Message: "test", - Reason: "test", + { + MatchExpressions: []*model.LabelSelectorRequirement{ + { + Key: "test-key3", + Operator: string(corev1.NodeSelectorOpIn), + Values: []string{"test-value1", "test-value3"}, + }, + }, }, - Tags: []string{"pv_phase:pending", "pv_type:gcepersistentdisk"}, }, }, + Status: &model.PersistentVolumeStatus{ + Phase: string(corev1.VolumePending), + Message: "test", + Reason: "test", + }, + Tags: []string{"pv_phase:pending"}, } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - assert.Equal(t, &tc.expected, ExtractPersistentVolume(&tc.input)) - }) - } +} + +func toPt[T any](s T) *T { + return &s } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/pod.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/pod.go index 0dde68db30dda..71becf7062efa 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/pod.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/pod.go @@ -12,7 +12,6 @@ import ( "hash/fnv" "sort" "strconv" - "strings" model "github.com/DataDog/agent-payload/v5/process" @@ -274,9 +273,9 @@ func convertResourceRequirements(rq corev1.ResourceRequirements, containerName s // extractPodConditions iterates over pod conditions and returns: // - the payload representation of those conditions // - the list of tags that will enable pod filtering by condition -func extractPodConditions(p *corev1.Pod) (conditions []*model.PodCondition, conditionTags []string) { - conditions = make([]*model.PodCondition, 0, len(p.Status.Conditions)) - conditionTags = make([]string, 0, len(p.Status.Conditions)) +func extractPodConditions(p *corev1.Pod) ([]*model.PodCondition, []string) { + conditions := make([]*model.PodCondition, 0, len(p.Status.Conditions)) + conditionTags := make([]string, 0, len(p.Status.Conditions)) for _, condition := range p.Status.Conditions { c := &model.PodCondition{ @@ -294,11 +293,11 @@ func extractPodConditions(p *corev1.Pod) (conditions []*model.PodCondition, cond conditions = append(conditions, c) - conditionTag := fmt.Sprintf("kube_condition_%s:%s", strings.ToLower(string(condition.Type)), strings.ToLower(string(condition.Status))) + conditionTag := createConditionTag(string(condition.Type), string(condition.Status)) conditionTags = append(conditionTags, conditionTag) } - return + return conditions, conditionTags } // getConditionMessage loops through the pod conditions, and reports the message of the first one diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/replicaset.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/replicaset.go index bf6c72cd9ada5..66219033bcbc1 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/replicaset.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/replicaset.go @@ -9,8 +9,9 @@ package k8s import ( model "github.com/DataDog/agent-payload/v5/process" - "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" appsv1 "k8s.io/api/apps/v1" + + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" ) // ExtractReplicaSet returns the protobuf model corresponding to a Kubernetes @@ -34,8 +35,41 @@ func ExtractReplicaSet(rs *appsv1.ReplicaSet) *model.ReplicaSet { replicaSet.ReadyReplicas = rs.Status.ReadyReplicas replicaSet.AvailableReplicas = rs.Status.AvailableReplicas + if len(rs.Status.Conditions) > 0 { + rsConditions, conditionTags := extractReplicaSetConditions(rs) + replicaSet.Conditions = rsConditions + replicaSet.Tags = append(replicaSet.Tags, conditionTags...) + } + replicaSet.ResourceRequirements = ExtractPodTemplateResourceRequirements(rs.Spec.Template) replicaSet.Tags = append(replicaSet.Tags, transformers.RetrieveUnifiedServiceTags(rs.ObjectMeta.Labels)...) return &replicaSet } + +// extractReplicaSetConditions iterates over replicaset conditions and returns: +// - the payload representation of those conditions +// - the list of tags that will enable pod filtering by condition +func extractReplicaSetConditions(p *appsv1.ReplicaSet) ([]*model.ReplicaSetCondition, []string) { + conditions := make([]*model.ReplicaSetCondition, 0, len(p.Status.Conditions)) + conditionTags := make([]string, 0, len(p.Status.Conditions)) + + for _, condition := range p.Status.Conditions { + c := &model.ReplicaSetCondition{ + Message: condition.Message, + Reason: condition.Reason, + Status: string(condition.Status), + Type: string(condition.Type), + } + if !condition.LastTransitionTime.IsZero() { + c.LastTransitionTime = condition.LastTransitionTime.Unix() + } + + conditions = append(conditions, c) + + conditionTag := createConditionTag(string(condition.Type), string(condition.Status)) + conditionTags = append(conditionTags, conditionTag) + } + + return conditions, conditionTags +} diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/replicaset_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/replicaset_test.go index feeb4fbb24aab..c5ab710aeb3b8 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/replicaset_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/replicaset_test.go @@ -15,6 +15,7 @@ import ( "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ) @@ -61,6 +62,15 @@ func TestExtractReplicaSet(t *testing.T) { FullyLabeledReplicas: 2, ReadyReplicas: 1, AvailableReplicas: 1, + Conditions: []appsv1.ReplicaSetCondition{ + { + Type: appsv1.ReplicaSetReplicaFailure, + Status: v1.ConditionFalse, + LastTransitionTime: timestamp, + Reason: "test reason", + Message: "test message", + }, + }, }, }, expected: model.ReplicaSet{ Metadata: &model.Metadata{ @@ -72,6 +82,16 @@ func TestExtractReplicaSet(t *testing.T) { Annotations: []string{"annotation:bar"}, ResourceVersion: "1234", }, + Conditions: []*model.ReplicaSetCondition{ + { + Type: string(appsv1.ReplicaSetReplicaFailure), + Status: string(v1.ConditionFalse), + LastTransitionTime: timestamp.Unix(), + Reason: "test reason", + Message: "test message", + }, + }, + Tags: []string{"kube_condition_replicafailure:false"}, Selectors: []*model.LabelSelectorRequirement{ { Key: "app", diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/statefulset.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/statefulset.go index f002a06bf6e73..13b05a5b6a24d 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/statefulset.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/statefulset.go @@ -9,6 +9,7 @@ package k8s import ( model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" v1 "k8s.io/api/apps/v1" @@ -46,8 +47,41 @@ func ExtractStatefulSet(sts *v1.StatefulSet) *model.StatefulSet { statefulSet.Spec.Selectors = extractLabelSelector(sts.Spec.Selector) } + if len(sts.Status.Conditions) > 0 { + sConditions, conditionTags := extractStatefulSetConditions(sts) + statefulSet.Conditions = sConditions + statefulSet.Tags = append(statefulSet.Tags, conditionTags...) + } + statefulSet.Spec.ResourceRequirements = ExtractPodTemplateResourceRequirements(sts.Spec.Template) statefulSet.Tags = append(statefulSet.Tags, transformers.RetrieveUnifiedServiceTags(sts.ObjectMeta.Labels)...) return &statefulSet } + +// extractStatefulSetConditions iterates over stateful conditions and returns: +// - the payload representation of those conditions +// - the list of tags that will enable pod filtering by condition +func extractStatefulSetConditions(s *v1.StatefulSet) ([]*model.StatefulSetCondition, []string) { + conditions := make([]*model.StatefulSetCondition, 0, len(s.Status.Conditions)) + conditionTags := make([]string, 0, len(s.Status.Conditions)) + + for _, condition := range s.Status.Conditions { + c := &model.StatefulSetCondition{ + Message: condition.Message, + Reason: condition.Reason, + Status: string(condition.Status), + Type: string(condition.Type), + } + if !condition.LastTransitionTime.IsZero() { + c.LastTransitionTime = condition.LastTransitionTime.Unix() + } + + conditions = append(conditions, c) + + conditionTag := createConditionTag(string(condition.Type), string(condition.Status)) + conditionTags = append(conditionTags, conditionTag) + } + + return conditions, conditionTags +} diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/statefulset_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/statefulset_test.go index 666332d7f5be9..bb24525b9c22e 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/statefulset_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/statefulset_test.go @@ -15,6 +15,7 @@ import ( "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ) @@ -57,6 +58,15 @@ func TestExtractStatefulSet(t *testing.T) { }, }, Status: appsv1.StatefulSetStatus{ + Conditions: []appsv1.StatefulSetCondition{ + { + Type: "Test", + Status: v1.ConditionFalse, + LastTransitionTime: timestamp, + Reason: "testing", + Message: "123", + }, + }, ObservedGeneration: 3, ReadyReplicas: 2, Replicas: 2, @@ -72,6 +82,16 @@ func TestExtractStatefulSet(t *testing.T) { Annotations: []string{"annotation:bar"}, ResourceVersion: "1234", }, + Conditions: []*model.StatefulSetCondition{ + { + Type: "Test", + Status: string(v1.ConditionFalse), + LastTransitionTime: timestamp.Unix(), + Reason: "testing", + Message: "123", + }, + }, + Tags: []string{"kube_condition_test:false"}, Spec: &model.StatefulSetSpec{ DesiredReplicas: 2, UpdateStrategy: "RollingUpdate", diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/verticalpodautoscaler.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/verticalpodautoscaler.go index 09c84bd0d335a..6d1ce327aa06c 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/verticalpodautoscaler.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/verticalpodautoscaler.go @@ -12,6 +12,7 @@ import ( v1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" ) @@ -26,6 +27,14 @@ func ExtractVerticalPodAutoscaler(v *v1.VerticalPodAutoscaler) *model.VerticalPo Spec: extractVerticalPodAutoscalerSpec(&v.Spec), Status: extractVerticalPodAutoscalerStatus(&v.Status), } + + // This is duplicated in status, but this matches other resource pattern + if len(v.Status.Conditions) > 0 { + vpaConditions, conditionTags := extractVerticalPodAutoscalerConditions(v) + m.Conditions = vpaConditions + m.Tags = append(m.Tags, conditionTags...) + } + m.Tags = append(m.Tags, transformers.RetrieveUnifiedServiceTags(v.ObjectMeta.Labels)...) return m } @@ -170,3 +179,30 @@ func extractContainerConditions(cr []v1.VerticalPodAutoscalerCondition) []*model } return con } + +// extractVerticalPodAutoscalerConditions iterates over vpa conditions and returns: +// - the payload representation of those conditions +// - the list of tags that will enable pod filtering by condition +func extractVerticalPodAutoscalerConditions(p *v1.VerticalPodAutoscaler) ([]*model.VerticalPodAutoscalerCondition, []string) { + conditions := make([]*model.VerticalPodAutoscalerCondition, 0, len(p.Status.Conditions)) + conditionTags := make([]string, 0, len(p.Status.Conditions)) + + for _, condition := range p.Status.Conditions { + c := &model.VerticalPodAutoscalerCondition{ + Message: condition.Message, + Reason: condition.Reason, + Status: string(condition.Status), + Type: string(condition.Type), + } + if !condition.LastTransitionTime.IsZero() { + c.LastTransitionTime = condition.LastTransitionTime.Unix() + } + + conditions = append(conditions, c) + + conditionTag := createConditionTag(string(condition.Type), string(condition.Status)) + conditionTags = append(conditionTags, conditionTag) + } + + return conditions, conditionTags +} diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/verticalpodautoscaler_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/verticalpodautoscaler_test.go index 3a3725d94ffe8..6054c58092b67 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/verticalpodautoscaler_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/verticalpodautoscaler_test.go @@ -194,6 +194,24 @@ func TestExtractVerticalPodAutoscaler(t *testing.T) { }, }, }, + Conditions: []*model.VerticalPodAutoscalerCondition{ + { + Type: string(v1.RecommendationProvided), + Status: string(corev1.ConditionTrue), + LastTransitionTime: exampleTime.Unix(), + }, + { + Type: string(v1.NoPodsMatched), + Status: string(corev1.ConditionTrue), + LastTransitionTime: exampleTime.Unix(), + Reason: "NoPodsMatched", + Message: "No pods match this VPA object", + }, + }, + Tags: []string{ + "kube_condition_recommendationprovided:true", + "kube_condition_nopodsmatched:true", + }, }, }, "minimum-required": { diff --git a/pkg/collector/corechecks/containerimage/check.go b/pkg/collector/corechecks/containerimage/check.go index fe181a0a41c45..71b762f67345a 100644 --- a/pkg/collector/corechecks/containerimage/check.go +++ b/pkg/collector/corechecks/containerimage/check.go @@ -6,6 +6,7 @@ package containerimage import ( + "errors" "time" yaml "gopkg.in/yaml.v2" @@ -13,6 +14,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" + ddConfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/workloadmeta" ) @@ -101,6 +103,10 @@ func CheckFactory() check.Check { // Configure parses the check configuration and initializes the container_image check func (c *Check) Configure(integrationConfigDigest uint64, config, initConfig integration.Data, source string) error { + if !ddConfig.Datadog.GetBool("container_image.enabled") { + return errors.New("collection of container images is disabled") + } + if err := c.CommonConfigure(integrationConfigDigest, initConfig, config, source); err != nil { return err } diff --git a/pkg/collector/corechecks/containerlifecycle/check.go b/pkg/collector/corechecks/containerlifecycle/check.go index 5c95aec0c4ed6..ca4bfb21cafd9 100644 --- a/pkg/collector/corechecks/containerlifecycle/check.go +++ b/pkg/collector/corechecks/containerlifecycle/check.go @@ -7,6 +7,7 @@ package containerlifecycle import ( "context" + "errors" "time" yaml "gopkg.in/yaml.v2" @@ -14,6 +15,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" + ddConfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/workloadmeta" ) @@ -50,6 +52,10 @@ type Check struct { // Configure parses the check configuration and initializes the container_lifecycle check func (c *Check) Configure(integrationConfigDigest uint64, config, initConfig integration.Data, source string) error { + if !ddConfig.Datadog.GetBool("container_lifecycle.enabled") { + return errors.New("collection of container lifecycle events is disabled") + } + var err error err = c.CommonConfigure(integrationConfigDigest, initConfig, config, source) diff --git a/pkg/collector/corechecks/containers/containerd/adapter.go b/pkg/collector/corechecks/containers/containerd/adapter.go index d5bc193035efe..f3abf32cf02b6 100644 --- a/pkg/collector/corechecks/containers/containerd/adapter.go +++ b/pkg/collector/corechecks/containers/containerd/adapter.go @@ -27,7 +27,7 @@ var metricsNameMapping = map[string]string{ "container.memory.cache": "containerd.mem.cache", "container.memory.swap": "containerd.mem.swap.usage", "container.memory.oom_events": "containerd.mem.current.failcnt", - "container.memory.working_set": "containerd.mem.private_working_set", + "container.memory.working_set": "containerd.mem.working_set", "container.memory.commit": "containerd.mem.commit", "container.memory.commit.peak": "containerd.mem.commit_peak", "container.io.read": "", // Remapping requires retagging, handled in extension diff --git a/pkg/collector/corechecks/containers/containerd/check_test.go b/pkg/collector/corechecks/containers/containerd/check_test.go index 58f1e9f8e2dd6..274956eaf5581 100644 --- a/pkg/collector/corechecks/containers/containerd/check_test.go +++ b/pkg/collector/corechecks/containers/containerd/check_test.go @@ -49,7 +49,7 @@ func TestContainerdCheckGenericPart(t *testing.T) { expectedTags := []string{"runtime:containerd"} mockSender.AssertNumberOfCalls(t, "Rate", 13) - mockSender.AssertNumberOfCalls(t, "Gauge", 10) + mockSender.AssertNumberOfCalls(t, "Gauge", 11) mockSender.AssertMetricInRange(t, "Gauge", "containerd.uptime", 0, 600, "", expectedTags) mockSender.AssertMetric(t, "Rate", "containerd.cpu.total", 100, "", expectedTags) @@ -63,6 +63,7 @@ func TestContainerdCheckGenericPart(t *testing.T) { mockSender.AssertMetric(t, "Gauge", "containerd.mem.kernel.usage", 40, "", expectedTags) mockSender.AssertMetric(t, "Gauge", "containerd.mem.current.limit", 42000, "", expectedTags) mockSender.AssertMetric(t, "Gauge", "containerd.mem.rss", 300, "", expectedTags) + mockSender.AssertMetric(t, "Gauge", "containerd.mem.working_set", 350, "", expectedTags) mockSender.AssertMetric(t, "Gauge", "containerd.mem.cache", 200, "", expectedTags) mockSender.AssertMetric(t, "Gauge", "containerd.mem.swap.usage", 0, "", expectedTags) mockSender.AssertMetric(t, "Gauge", "containerd.mem.current.failcnt", 10, "", expectedTags) diff --git a/pkg/collector/corechecks/containers/docker/adapter.go b/pkg/collector/corechecks/containers/docker/adapter.go index 67f749d18f440..3b535aeb957ac 100644 --- a/pkg/collector/corechecks/containers/docker/adapter.go +++ b/pkg/collector/corechecks/containers/docker/adapter.go @@ -28,7 +28,7 @@ var metricsNameMapping = map[string]string{ "container.memory.cache": "docker.mem.cache", "container.memory.swap": "docker.mem.swap", "container.memory.oom_events": "docker.mem.failed_count", - "container.memory.working_set": "docker.mem.private_working_set", + "container.memory.working_set": "docker.mem.working_set", "container.memory.commit": "docker.mem.commit_bytes", "container.memory.commit.peak": "docker.mem.commit_peak_bytes", "container.io.read": "docker.io.read_bytes", diff --git a/pkg/collector/corechecks/containers/docker/check_test.go b/pkg/collector/corechecks/containers/docker/check_test.go index 0941f580fbe95..cacb3b027fd28 100644 --- a/pkg/collector/corechecks/containers/docker/check_test.go +++ b/pkg/collector/corechecks/containers/docker/check_test.go @@ -64,7 +64,7 @@ func TestDockerCheckGenericPart(t *testing.T) { expectedTags := []string{"runtime:docker"} mockSender.AssertNumberOfCalls(t, "Rate", 13) - mockSender.AssertNumberOfCalls(t, "Gauge", 15) + mockSender.AssertNumberOfCalls(t, "Gauge", 16) mockSender.AssertMetricInRange(t, "Gauge", "docker.uptime", 0, 600, "", expectedTags) mockSender.AssertMetric(t, "Rate", "docker.cpu.usage", 1e-5, "", expectedTags) @@ -80,6 +80,7 @@ func TestDockerCheckGenericPart(t *testing.T) { mockSender.AssertMetric(t, "Gauge", "docker.mem.soft_limit", 40000, "", expectedTags) mockSender.AssertMetric(t, "Gauge", "docker.mem.rss", 300, "", expectedTags) mockSender.AssertMetric(t, "Gauge", "docker.mem.cache", 200, "", expectedTags) + mockSender.AssertMetric(t, "Gauge", "docker.mem.working_set", 350, "", expectedTags) mockSender.AssertMetric(t, "Gauge", "docker.mem.swap", 0, "", expectedTags) mockSender.AssertMetric(t, "Gauge", "docker.mem.failed_count", 10, "", expectedTags) mockSender.AssertMetricInRange(t, "Gauge", "docker.mem.in_use", 0, 1, "", expectedTags) diff --git a/pkg/collector/corechecks/containers/generic/processor.go b/pkg/collector/corechecks/containers/generic/processor.go index 71b64a6da2844..4d71a6f332517 100644 --- a/pkg/collector/corechecks/containers/generic/processor.go +++ b/pkg/collector/corechecks/containers/generic/processor.go @@ -165,7 +165,8 @@ func (p *Processor) processContainer(sender aggregator.Sender, tags []string, co p.sendMetric(sender.Gauge, "container.memory.cache", containerStats.Memory.Cache, tags) p.sendMetric(sender.Gauge, "container.memory.swap", containerStats.Memory.Swap, tags) p.sendMetric(sender.Gauge, "container.memory.oom_events", containerStats.Memory.OOMEvents, tags) - p.sendMetric(sender.Gauge, "container.memory.working_set", containerStats.Memory.PrivateWorkingSet, tags) + p.sendMetric(sender.Gauge, "container.memory.working_set", containerStats.Memory.WorkingSet, tags) // Linux + p.sendMetric(sender.Gauge, "container.memory.working_set", containerStats.Memory.PrivateWorkingSet, tags) // Windows p.sendMetric(sender.Gauge, "container.memory.commit", containerStats.Memory.CommitBytes, tags) p.sendMetric(sender.Gauge, "container.memory.commit.peak", containerStats.Memory.CommitPeakBytes, tags) p.sendMetric(sender.Rate, "container.memory.partial_stall", containerStats.Memory.PartialStallTime, tags) diff --git a/pkg/collector/corechecks/containers/generic/processor_test.go b/pkg/collector/corechecks/containers/generic/processor_test.go index ebd1482a48463..ff0602b32ba43 100644 --- a/pkg/collector/corechecks/containers/generic/processor_test.go +++ b/pkg/collector/corechecks/containers/generic/processor_test.go @@ -37,7 +37,7 @@ func TestProcessorRunFullStatsLinux(t *testing.T) { expectedTags := []string{"runtime:docker"} mockSender.AssertNumberOfCalls(t, "Rate", 20) - mockSender.AssertNumberOfCalls(t, "Gauge", 14) + mockSender.AssertNumberOfCalls(t, "Gauge", 15) mockSender.AssertMetricInRange(t, "Gauge", "container.uptime", 0, 600, "", expectedTags) mockSender.AssertMetric(t, "Rate", "container.cpu.usage", 100, "", expectedTags) @@ -54,6 +54,7 @@ func TestProcessorRunFullStatsLinux(t *testing.T) { mockSender.AssertMetric(t, "Gauge", "container.memory.soft_limit", 40000, "", expectedTags) mockSender.AssertMetric(t, "Gauge", "container.memory.rss", 300, "", expectedTags) mockSender.AssertMetric(t, "Gauge", "container.memory.cache", 200, "", expectedTags) + mockSender.AssertMetric(t, "Gauge", "container.memory.working_set", 350, "", expectedTags) mockSender.AssertMetric(t, "Gauge", "container.memory.swap", 0, "", expectedTags) mockSender.AssertMetric(t, "Gauge", "container.memory.oom_events", 10, "", expectedTags) mockSender.AssertMetric(t, "Rate", "container.memory.partial_stall", 97000, "", expectedTags) diff --git a/pkg/collector/corechecks/ebpf/probe/oom_kill.go b/pkg/collector/corechecks/ebpf/probe/oom_kill.go index 2b324bd2b43d3..70c8a21f78459 100644 --- a/pkg/collector/corechecks/ebpf/probe/oom_kill.go +++ b/pkg/collector/corechecks/ebpf/probe/oom_kill.go @@ -146,16 +146,18 @@ func (k *OOMKillProbe) GetAndFlush() (results []OOMKillStats) { it := k.oomMap.Iterate() for it.Next(unsafe.Pointer(&pid), unsafe.Pointer(&stat)) { results = append(results, convertStats(stat)) - - if err := k.oomMap.Delete(unsafe.Pointer(&pid)); err != nil { - log.Warnf("failed to delete stat: %s", err) - } } if err := it.Err(); err != nil { log.Warnf("failed to iterate on OOM stats while flushing: %s", err) } + for _, r := range results { + if err := k.oomMap.Delete(unsafe.Pointer(&r.Pid)); err != nil { + log.Warnf("failed to delete stat: %s", err) + } + } + return results } diff --git a/pkg/collector/corechecks/ebpf/probe/oom_kill_test.go b/pkg/collector/corechecks/ebpf/probe/oom_kill_test.go index 417d16262bc52..a74b5a7e5bcd1 100644 --- a/pkg/collector/corechecks/ebpf/probe/oom_kill_test.go +++ b/pkg/collector/corechecks/ebpf/probe/oom_kill_test.go @@ -8,8 +8,7 @@ package probe import ( - "fmt" - "os" + "context" "os/exec" "regexp" "syscall" @@ -22,117 +21,88 @@ import ( "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode/runtime" + "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" "github.com/DataDog/datadog-agent/pkg/process/statsd" "github.com/DataDog/datadog-agent/pkg/util/kernel" ) -const oomKilledPython = ` -l = [] -while True: - l.append("." * (1024 * 1024)) -` - -const oomKilledBashScript = ` -exec systemd-run --scope -p MemoryLimit=1M python3 %v # replace shell, so that the process launched by Go is the one getting oom-killed -` - -func writeTempFile(pattern string, content string) (*os.File, error) { - f, err := os.CreateTemp("", pattern) - if err != nil { - return nil, err - } - defer f.Close() - - if _, err := f.WriteString(content); err != nil { - return nil, err - } - - return f, nil -} +var kv = kernel.MustHostVersion() func TestOOMKillCompile(t *testing.T) { - kv, err := kernel.HostVersion() - if err != nil { - t.Fatal(err) - } - if kv < kernel.VersionCode(4, 9, 0) { - t.Skipf("Kernel version %v is not supported by the OOM probe", kv) - } - - cfg := testConfig() - cfg.BPFDebug = true - out, err := runtime.OomKill.Compile(cfg, []string{"-g"}, statsd.Client) - require.NoError(t, err) - _ = out.Close() + ebpftest.TestBuildMode(t, ebpftest.RuntimeCompiled, "", func(t *testing.T) { + if kv < kernel.VersionCode(4, 9, 0) { + t.Skipf("Kernel version %v is not supported by the OOM probe", kv) + } + + cfg := testConfig() + cfg.BPFDebug = true + out, err := runtime.OomKill.Compile(cfg, []string{"-g"}, statsd.Client) + require.NoError(t, err) + _ = out.Close() + }) } func TestOOMKillProbe(t *testing.T) { - kv, err := kernel.HostVersion() - if err != nil { - t.Fatal(err) - } - if kv < kernel.VersionCode(4, 9, 0) { - t.Skipf("Kernel version %v is not supported by the OOM probe", kv) - } - - cfg := testConfig() - oomKillProbe, err := NewOOMKillProbe(cfg) - if err != nil { - t.Fatal(err) - } - defer oomKillProbe.Close() - - pf, err := writeTempFile("oom-kill-py", oomKilledPython) - if err != nil { - t.Fatal(err) - } - defer os.Remove(pf.Name()) - - bf, err := writeTempFile("oom-trigger-sh", fmt.Sprintf(oomKilledBashScript, pf.Name())) - if err != nil { - t.Fatal(err) - } - defer os.Remove(bf.Name()) - - cmd := exec.Command("bash", bf.Name()) - - oomKilled := false - if err := cmd.Run(); err != nil { - if exiterr, ok := err.(*exec.ExitError); ok { - if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { - if (status.Signaled() && status.Signal() == unix.SIGKILL) || status.ExitStatus() == 137 { - oomKilled = true - } - } + ebpftest.TestBuildModes(t, []ebpftest.BuildMode{ebpftest.RuntimeCompiled, ebpftest.CORE}, "", func(t *testing.T) { + if kv < kernel.VersionCode(4, 9, 0) { + t.Skipf("Kernel version %v is not supported by the OOM probe", kv) } - if !oomKilled { - output, _ := cmd.CombinedOutput() - t.Fatalf("expected process to be killed: %s (output: %s)", err, string(output)) + cfg := testConfig() + oomKillProbe, err := NewOOMKillProbe(cfg) + if err != nil { + t.Fatal(err) } - } - - time.Sleep(3 * time.Second) - - found := false - results := oomKillProbe.GetAndFlush() - for _, result := range results { - if result.TPid == uint32(cmd.Process.Pid) { - found = true - - assert.Regexp(t, regexp.MustCompile("run-([0-9|a-z]*).scope"), result.CgroupName, "cgroup name") - assert.Equal(t, result.TPid, result.Pid, "tpid == pid") - assert.Equal(t, "python3", result.FComm, "fcomm") - assert.Equal(t, "python3", result.TComm, "tcomm") - assert.NotZero(t, result.Pages, "pages") - assert.Equal(t, uint32(1), result.MemCgOOM, "memcg oom") - break + t.Cleanup(oomKillProbe.Close) + + t.Cleanup(func() { + out, err := exec.Command("swapon", "-a").CombinedOutput() + if err != nil { + t.Logf("swapon -a: %s: %s", err, out) + } + }) + require.NoError(t, exec.Command("swapoff", "-a").Run()) + + ctx, cancel := context.WithTimeout(context.Background(), 4*time.Minute) + t.Cleanup(cancel) + + cmd := exec.CommandContext(ctx, "systemd-run", "--scope", "-p", "MemoryLimit=1M", "dd", "if=/dev/zero", "of=/dev/shm/asdf", "bs=1K", "count=2K") + obytes, err := cmd.CombinedOutput() + output := string(obytes) + require.Error(t, err) + require.NotErrorIs(t, err, context.DeadlineExceeded) + + var exiterr *exec.ExitError + require.ErrorAs(t, err, &exiterr, output) + var status syscall.WaitStatus + + status, sok := exiterr.Sys().(syscall.WaitStatus) + require.True(t, sok, output) + + if status.Signaled() { + require.Equal(t, unix.SIGKILL, status.Signal(), output) + } else { + require.Equal(t, 128+unix.SIGKILL, status.ExitStatus(), output) } - } - if !found { - t.Errorf("failed to find an OOM killed process with pid %d in %+v", cmd.Process.Pid, results) - } + var result OOMKillStats + require.Eventually(t, func() bool { + for _, r := range oomKillProbe.GetAndFlush() { + if r.TPid == uint32(cmd.Process.Pid) { + result = r + return true + } + } + return false + }, 5*time.Second, 500*time.Millisecond, "failed to find an OOM killed process with pid %d", cmd.Process.Pid) + + assert.Regexp(t, regexp.MustCompile("run-([0-9|a-z]*).scope"), result.CgroupName, "cgroup name") + assert.Equal(t, result.TPid, result.Pid, "tpid == pid") + assert.Equal(t, "dd", result.FComm, "fcomm") + assert.Equal(t, "dd", result.TComm, "tcomm") + assert.NotZero(t, result.Pages, "pages") + assert.Equal(t, uint32(1), result.MemCgOOM, "memcg oom") + }) } func testConfig() *ebpf.Config { diff --git a/pkg/collector/corechecks/ebpf/probe/tcp_queue_length_test.go b/pkg/collector/corechecks/ebpf/probe/tcp_queue_length_test.go index ded86c8c388cb..9af694cfdec8b 100644 --- a/pkg/collector/corechecks/ebpf/probe/tcp_queue_length_test.go +++ b/pkg/collector/corechecks/ebpf/probe/tcp_queue_length_test.go @@ -9,62 +9,60 @@ package probe import ( "net" - "sync" "testing" "time" "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode/runtime" + "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" "github.com/DataDog/datadog-agent/pkg/process/statsd" "github.com/DataDog/datadog-agent/pkg/util/kernel" ) func TestTCPQueueLengthCompile(t *testing.T) { - kv, err := kernel.HostVersion() - if err != nil { - t.Fatal(err) - } - if kv < kernel.VersionCode(4, 8, 0) { - t.Skipf("Kernel version %v is not supported by the OOM probe", kv) - } + ebpftest.TestBuildMode(t, ebpftest.RuntimeCompiled, "", func(t *testing.T) { + if kv < kernel.VersionCode(4, 8, 0) { + t.Skipf("Kernel version %v is not supported by the TCP Queue Length probe", kv) + } - cfg := ebpf.NewConfig() - cfg.BPFDebug = true - out, err := runtime.TcpQueueLength.Compile(cfg, []string{"-g"}, statsd.Client) - require.NoError(t, err) - _ = out.Close() + cfg := ebpf.NewConfig() + cfg.BPFDebug = true + out, err := runtime.TcpQueueLength.Compile(cfg, []string{"-g"}, statsd.Client) + require.NoError(t, err) + _ = out.Close() + }) } func TestTCPQueueLengthTracer(t *testing.T) { - kv, err := kernel.HostVersion() - if err != nil { - t.Fatal(err) - } - if kv < kernel.VersionCode(4, 8, 0) { - t.Skipf("Kernel version %v is not supported by the OOM probe", kv) - } - - cfg := ebpf.NewConfig() - tcpTracer, err := NewTCPQueueLengthTracer(cfg) - if err != nil { - t.Fatal(err) - } + ebpftest.TestBuildModes(t, []ebpftest.BuildMode{ebpftest.RuntimeCompiled, ebpftest.CORE}, "", func(t *testing.T) { + if kv < kernel.VersionCode(4, 8, 0) { + t.Skipf("Kernel version %v is not supported by the OOM probe", kv) + } - beforeStats := extractGlobalStats(t, tcpTracer) - if beforeStats.ReadBufferMaxUsage > 10 { - t.Errorf("max usage of read buffer is too big before the stress test: %d > 10", beforeStats.ReadBufferMaxUsage) - } + cfg := ebpf.NewConfig() + tcpTracer, err := NewTCPQueueLengthTracer(cfg) + require.NoError(t, err) + t.Cleanup(tcpTracer.Close) - runTCPLoadTest() + beforeStats := extractGlobalStats(t, tcpTracer) + if beforeStats.ReadBufferMaxUsage > 10 { + t.Errorf("max usage of read buffer is too big before the stress test: %d > 10", beforeStats.ReadBufferMaxUsage) + } - afterStats := extractGlobalStats(t, tcpTracer) - if afterStats.ReadBufferMaxUsage < 1000 { - t.Errorf("max usage of read buffer is too low after the stress test: %d < 1000", afterStats.ReadBufferMaxUsage) - } + err = runTCPLoadTest() + require.NoError(t, err) + if total != msgLen { + require.Equal(t, msgLen, total, "message length") + } - defer tcpTracer.Close() + afterStats := extractGlobalStats(t, tcpTracer) + if afterStats.ReadBufferMaxUsage < 1000 { + t.Errorf("max usage of read buffer is too low after the stress test: %d < 1000", afterStats.ReadBufferMaxUsage) + } + }) } func extractGlobalStats(t *testing.T, tracer *TCPQueueLengthTracer) TCPQueueLengthStatsValue { @@ -100,16 +98,16 @@ var Addr *net.TCPAddr = &net.TCPAddr{ Port: 25568, } +const msgLen = 10000 + var ( - isInSlowMode = true - wg sync.WaitGroup - serverReadyLock sync.Mutex - serverReadyCond = sync.NewCond(&serverReadyLock) + isInSlowMode = true + total int + serverReady chan struct{} ) func handleRequest(conn *net.TCPConn) error { - defer wg.Done() - total := 0 + defer conn.Close() outer: for { buf := make([]byte, 10) @@ -131,7 +129,6 @@ outer: } } - conn.Close() return nil } @@ -142,10 +139,9 @@ func server() error { } defer listener.Close() - serverReadyCond.Broadcast() + close(serverReady) conn, err := listener.AcceptTCP() - if err != nil { return err } @@ -155,10 +151,7 @@ func server() error { } func client() error { - defer wg.Done() - const msgLen = 10000 - - serverReadyCond.Wait() + <-serverReady conn, err := net.DialTCP("tcp", nil, Addr) if err != nil { @@ -178,11 +171,12 @@ func client() error { return nil } -func runTCPLoadTest() { - serverReadyLock.Lock() +func runTCPLoadTest() error { + serverReady = make(chan struct{}) + total = 0 - wg.Add(2) - go server() - go client() - wg.Wait() + g := new(errgroup.Group) + g.Go(server) + g.Go(client) + return g.Wait() } diff --git a/pkg/collector/corechecks/oracle-dbm/config/config.go b/pkg/collector/corechecks/oracle-dbm/config/config.go index 8cd31e46384e4..930bd9ba826f4 100644 --- a/pkg/collector/corechecks/oracle-dbm/config/config.go +++ b/pkg/collector/corechecks/oracle-dbm/config/config.go @@ -31,26 +31,51 @@ type QueryMetricsConfig struct { IncludeDatadogQueries bool `yaml:"include_datadog_queries"` } +type SysMetricsConfig struct { + Enabled bool `yaml:"enabled"` +} + +type TablespacesConfig struct { + Enabled bool `yaml:"enabled"` +} + +type ProcessMemoryConfig struct { + Enabled bool `yaml:"enabled"` +} + +type ExecutionPlansConfig struct { + Enabled bool `yaml:"enabled"` +} + +type AgentSQLTrace struct { + Enabled bool `yaml:"enabled"` + Binds bool `yaml:"binds"` + Waits bool `yaml:"waits"` + TracedRuns int `yaml:"traced_runs"` +} + // InstanceConfig is used to deserialize integration instance config. type InstanceConfig struct { - Server string `yaml:"server"` - Port int `yaml:"port"` - ServiceName string `yaml:"service_name"` - Username string `yaml:"username"` - Password string `yaml:"password"` - TnsAlias string `yaml:"tns_alias"` - TnsAdmin string `yaml:"tns_admin"` - DBM bool `yaml:"dbm"` - Tags []string `yaml:"tags"` - LogUnobfuscatedQueries bool `yaml:"log_unobfuscated_queries"` - ObfuscatorOptions obfuscate.SQLConfig `yaml:"obfuscator_options"` - InstantClient bool `yaml:"instant_client"` - ReportedHostname string `yaml:"reported_hostname"` - QuerySamples QuerySamplesConfig `yaml:"query_samples"` - QueryMetrics QueryMetricsConfig `yaml:"query_metrics"` - CollectSysMetrics bool `yaml:"collect_sysmetrics"` - CollectTablespaces bool `yaml:"collect_tablespaces"` - CollectProcessMemory bool `yaml:"collect_process_memory"` + Server string `yaml:"server"` + Port int `yaml:"port"` + ServiceName string `yaml:"service_name"` + Username string `yaml:"username"` + Password string `yaml:"password"` + TnsAlias string `yaml:"tns_alias"` + TnsAdmin string `yaml:"tns_admin"` + DBM bool `yaml:"dbm"` + Tags []string `yaml:"tags"` + LogUnobfuscatedQueries bool `yaml:"log_unobfuscated_queries"` + ObfuscatorOptions obfuscate.SQLConfig `yaml:"obfuscator_options"` + InstantClient bool `yaml:"instant_client"` + ReportedHostname string `yaml:"reported_hostname"` + QuerySamples QuerySamplesConfig `yaml:"query_samples"` + QueryMetrics QueryMetricsConfig `yaml:"query_metrics"` + SysMetrics SysMetricsConfig `yaml:"sysmetrics"` + Tablespaces TablespacesConfig `yaml:"tablespaces"` + ProcessMemory ProcessMemoryConfig `yaml:"process_memory"` + ExecutionPlans ExecutionPlansConfig `yaml:"execution_plans"` + AgentSQLTrace AgentSQLTrace `yaml:"agent_sql_trace"` } // CheckConfig holds the config needed for an integration instance to run. @@ -82,9 +107,9 @@ func NewCheckConfig(rawInstance integration.Data, rawInitConfig integration.Data instance.QuerySamples.Enabled = true instance.QueryMetrics.Enabled = true - instance.CollectSysMetrics = true - instance.CollectTablespaces = true - instance.CollectProcessMemory = true + instance.SysMetrics.Enabled = true + instance.Tablespaces.Enabled = true + instance.ProcessMemory.Enabled = true // Defaults end if err := yaml.Unmarshal(rawInstance, &instance); err != nil { diff --git a/pkg/collector/corechecks/oracle-dbm/oracle.go b/pkg/collector/corechecks/oracle-dbm/oracle.go index 98aa1546de945..8ecfffe5b4e7b 100644 --- a/pkg/collector/corechecks/oracle-dbm/oracle.go +++ b/pkg/collector/corechecks/oracle-dbm/oracle.go @@ -24,6 +24,10 @@ import ( go_ora "github.com/sijms/go-ora/v2" ) +var MAX_OPEN_CONNECTIONS = 10 +var DEFAULT_SQL_TRACED_RUNS = 10 +var DB_TIMEOUT = "20000" + // The structure is filled by activity sampling and serves as a filter for query metrics type StatementsFilter struct { SQLIDs map[string]int @@ -62,6 +66,7 @@ type Check struct { statementsLastRun time.Time filePath string isRDS bool + sqlTraceRunsCount int } // Run executes the check. @@ -79,25 +84,26 @@ func (c *Check) Run() error { c.db = db } - if c.dbmEnabled { - if c.config.CollectSysMetrics { - err := c.SysMetrics() - if err != nil { - return err - } + if c.config.SysMetrics.Enabled { + err := c.SysMetrics() + if err != nil { + return err } - if c.config.CollectTablespaces { - err := c.Tablespaces() - if err != nil { - return err - } + } + if c.config.Tablespaces.Enabled { + err := c.Tablespaces() + if err != nil { + return err } - if c.config.CollectProcessMemory { - err := c.ProcessMemory() - if err != nil { - return err - } + } + if c.config.ProcessMemory.Enabled { + err := c.ProcessMemory() + if err != nil { + return err } + } + + if c.dbmEnabled { if c.config.QuerySamples.Enabled { err := c.SampleSession() if err != nil { @@ -110,18 +116,18 @@ func (c *Check) Run() error { } } } + } - if c.config.QuerySamples.Enabled { - err := c.SampleSession() + if c.config.AgentSQLTrace.Enabled { + log.Tracef("Traced runs %d", c.sqlTraceRunsCount) + c.sqlTraceRunsCount++ + if c.sqlTraceRunsCount >= c.config.AgentSQLTrace.TracedRuns { + c.config.AgentSQLTrace.Enabled = false + _, err := c.db.Exec("BEGIN dbms_monitor.session_trace_disable; END;") if err != nil { - return err - } - if c.config.QueryMetrics.Enabled { - _, err = c.StatementMetrics() - if err != nil { - return err - } + log.Errorf("failed to stop SQL trace: %v", err) } + c.db.SetMaxOpenConns(MAX_OPEN_CONNECTIONS) } } return nil @@ -142,7 +148,7 @@ func (c *Check) Connect() (*sqlx.DB, error) { connStr = fmt.Sprintf("%s/%s@%s/%s", c.config.Username, c.config.Password, c.config.Server, c.config.ServiceName) } else { oracleDriver = "oracle" - connStr = go_ora.BuildUrl(c.config.Server, c.config.Port, c.config.ServiceName, c.config.Username, c.config.Password, map[string]string{}) + connStr = go_ora.BuildUrl(c.config.Server, c.config.Port, c.config.ServiceName, c.config.Username, c.config.Password, map[string]string{"TIMEOUT": DB_TIMEOUT}) // https://github.com/jmoiron/sqlx/issues/854#issuecomment-1504070464 sqlx.BindDriver("oracle", sqlx.NAMED) } @@ -160,7 +166,7 @@ func (c *Check) Connect() (*sqlx.DB, error) { return nil, fmt.Errorf("failed to ping oracle instance: %w", err) } - db.SetMaxOpenConns(10) + db.SetMaxOpenConns(MAX_OPEN_CONNECTIONS) if c.cdbName == "" { row := db.QueryRow("SELECT /* DD */ name FROM v$database") @@ -199,9 +205,37 @@ func (c *Check) Connect() (*sqlx.DB, error) { } } + if c.config.AgentSQLTrace.Enabled { + db.SetMaxOpenConns(1) + _, err := db.Exec("ALTER SESSION SET tracefile_identifier='DDAGENT'") + if err != nil { + log.Warnf("failed to set tracefile_identifier: %v", err) + } + + /* We are concatenating values instead of passing parameters, because there seems to be a problem + * in go-ora with passing bool parameters to PL/SQL. As a mitigation, we are asserting that the + * parameters are bool + */ + binds := assertBool(c.config.AgentSQLTrace.Binds) + waits := assertBool(c.config.AgentSQLTrace.Waits) + setEventsStatement := fmt.Sprintf("BEGIN dbms_monitor.session_trace_enable (binds => %t, waits => %t); END;", binds, waits) + log.Trace("trace statement: %s", setEventsStatement) + _, err = db.Exec(setEventsStatement) + if err != nil { + log.Errorf("failed to set SQL trace: %v", err) + } + if c.config.AgentSQLTrace.TracedRuns == 0 { + c.config.AgentSQLTrace.TracedRuns = DEFAULT_SQL_TRACED_RUNS + } + } + return db, nil } +func assertBool(val bool) bool { + return val +} + // Teardown cleans up resources used throughout the check. func (c *Check) Teardown() { if c.db != nil { diff --git a/pkg/collector/corechecks/oracle-dbm/oracle_test.go b/pkg/collector/corechecks/oracle-dbm/oracle_test.go index 8c39857ef58da..18ce18276804a 100644 --- a/pkg/collector/corechecks/oracle-dbm/oracle_test.go +++ b/pkg/collector/corechecks/oracle-dbm/oracle_test.go @@ -127,8 +127,6 @@ func TestChkRun(t *testing.T) { initAndStartAgentDemultiplexer() chk.dbmEnabled = true - //chk.config.QueryMetrics = true - chk.config.InstanceConfig.InstantClient = false type RowsStruct struct { @@ -167,7 +165,8 @@ func TestLicense(t *testing.T) { if err != nil { fmt.Printf("failed to ping oracle instance: %s", err) } - row := db.QueryRow(`SELECT SUM(detected_usages) + var usedFeaturesCount int + err = db.Get(&usedFeaturesCount, `SELECT NVL(SUM(detected_usages),0) FROM dba_feature_usage_statistics WHERE name in ( 'ADDM', @@ -185,10 +184,9 @@ func TestLicense(t *testing.T) { 'SQL Tuning Set (user)' ) `) - var usedFeaturesCount int - err = row.Scan(&usedFeaturesCount) + //err = row.Scan(&usedFeaturesCount) if err != nil { - fmt.Printf("failed to query hostname and version: %s", err) + fmt.Printf("failed to query license info: %s", err) } assert.Equal(t, 0, usedFeaturesCount) } diff --git a/pkg/collector/corechecks/oracle-dbm/sql/setup.sql b/pkg/collector/corechecks/oracle-dbm/sql/setup.sql index 7631f34c820d6..541a10c51f534 100644 --- a/pkg/collector/corechecks/oracle-dbm/sql/setup.sql +++ b/pkg/collector/corechecks/oracle-dbm/sql/setup.sql @@ -12,6 +12,7 @@ grant select on v_$database to c##datadog ; grant select on v_$containers to c##datadog; grant select on v_$sqlstats to c##datadog ; grant select on v_$instance to c##datadog ; +grant select on v_$sql_plan_statistics_all to c##datadog ; grant select on dba_feature_usage_statistics to c##datadog ; grant select on v_$datafile to c##datadog ; grant select on v_$con_sysmetric to c##datadog ; diff --git a/pkg/collector/corechecks/oracle-dbm/statements.go b/pkg/collector/corechecks/oracle-dbm/statements.go index d6da3f214f0a9..65813ab085d2c 100644 --- a/pkg/collector/corechecks/oracle-dbm/statements.go +++ b/pkg/collector/corechecks/oracle-dbm/statements.go @@ -6,6 +6,7 @@ package oracle import ( + "database/sql" "encoding/json" "fmt" "math" @@ -23,7 +24,7 @@ import ( const STATEMENT_METRICS_QUERY = `SELECT /* DD */ c.name as pdb_name, %s, - plan_hash_value, + plan_hash_value, sum(parse_calls) as parse_calls, sum(disk_reads) as disk_reads, sum(direct_writes) as direct_writes, @@ -64,6 +65,48 @@ WHERE AND %s IN (%s) %s GROUP BY c.name, %s, plan_hash_value` +const PLAN_QUERY = `SELECT /* DD */ + timestamp, + operation, + options, + object_name, + object_type, + object_alias, + optimizer, + id, + parent_id, + depth, + position, + search_columns, + cost, + cardinality, + bytes, + partition_start, + partition_stop, + other, + cpu_cost, + io_cost, + temp_space, + access_predicates, + filter_predicates, + projection, + executions, + last_starts, + last_output_rows, + last_cr_buffer_gets, + last_disk_reads, + last_disk_writes, + last_elapsed_time, + last_memory_used, + last_degree, + last_tempseg_size, + c.name pdb_name +FROM v$sql_plan_statistics_all s, v$containers c +WHERE + child_address = ( SELECT last_active_child_address FROM v$sqlstats WHERE plan_hash_value = :1 ORDER BY last_active_time DESC FETCH FIRST 1 ROW ONLY) + AND s.con_id = c.con_id(+) +ORDER BY id, position` + type StatementMetricsKeyDB struct { PDBName string `db:"PDB_NAME"` SQLID string `db:"SQL_ID"` @@ -123,6 +166,7 @@ type QueryRow struct { QuerySignature string `json:"query_signature,omitempty" dbm:"query_signature,primary"` Tables []string `json:"dd_tables,omitempty" dbm:"table,tag"` Commands []string `json:"dd_commands,omitempty" dbm:"command,tag"` + Comments []string `json:"dd_comments,omitempty" dbm:"comments,tag"` } type OracleRowMonotonicCount struct { @@ -222,6 +266,126 @@ type FQTPayload struct { FQTDBOracle FQTDBOracle `json:"oracle"` } +type OraclePlan struct { + PlanHashValue uint64 `json:"plan_hash_value,omitempty"` + SQLID string `json:"sql_id,omitempty"` + Timestamp string `json:"created,omitempty"` + OptimizerMode string `json:"optimizer_mode,omitempty"` + Other string `json:"other"` + PDBName string `json:"pdb_name"` +} + +type PlanStatementMetadata struct { + Tables []string `json:"tables"` + Commands []string `json:"commands"` + Comments []string `json:"comments"` +} + +type PlanDefinition struct { + Operation string `json:"operation,omitempty"` + Options string `json:"options,omitempty"` + ObjectOwner string `json:"object_owner,omitempty"` + ObjectName string `json:"object_name,omitempty"` + ObjectAlias string `json:"object_alias,omitempty"` + ObjectType string `json:"object_type,omitempty"` + PlanStepId int64 `json:"id,omitempty"` + ParentId int64 `json:"parent_id,omitempty"` + Depth int64 `json:"depth,omitempty"` + Position int64 `json:"position,omitempty"` + SearchColumns int64 `json:"search_columns,omitempty"` + Cost float64 `json:"cost,omitempty"` + Cardinality float64 `json:"cardinality,omitempty"` + Bytes float64 `json:"bytes,omitempty"` + PartitionStart string `json:"partition_start,omitempty"` + PartitionStop string `json:"partition_stop,omitempty"` + CPUCost float64 `json:"cpu_cost,omitempty"` + IOCost float64 `json:"io_cost,omitempty"` + TempSpace float64 `json:"temp_space,omitempty"` + AccessPredicates string `json:"access_predicates,omitempty"` + FilterPredicates string `json:"filter_predicates,omitempty"` + Projection string `json:"projection,omitempty"` + LastStarts uint64 `json:"actual_starts,omitempty"` + LastOutputRows uint64 `json:"actual_rows,omitempty"` + LastCRBufferGets uint64 `json:"actual_cr_buffer_gets,omitempty"` + LastDiskReads uint64 `json:"actual_disk_reads,omitempty"` + LastDiskWrites uint64 `json:"actual_disk_writes,omitempty"` + LastElapsedTime uint64 `json:"actual_elapsed_time,omitempty"` + LastMemoryUsed uint64 `json:"actual_memory_used,omitempty"` + LastDegree uint64 `json:"actual_parallel_degree,omitempty"` + LastTempsegSize uint64 `json:"actual_tempseg_size,omitempty"` +} + +type PlanPlanDB struct { + Definition []PlanDefinition `json:"definition"` + Signature string `json:"signature"` +} + +type PlanDB struct { + Instance string `json:"instance,omitempty"` + Plan PlanPlanDB `json:"plan,omitempty"` + QuerySignature string `json:"query_signature,omitempty"` + Statement string `json:"statement,omitempty"` + Metadata PlanStatementMetadata `json:"metadata,omitempty"` +} + +type PlanPayload struct { + Timestamp float64 `json:"timestamp,omitempty"` + Host string `json:"host,omitempty"` // Host is the database hostname, not the agent hostname + AgentVersion string `json:"ddagentversion,omitempty"` + Source string `json:"ddsource"` + Tags string `json:"ddtags,omitempty"` + DBMType string `json:"dbm_type"` + PlanDB PlanDB `json:"db"` + OraclePlan OraclePlan `json:"oracle"` +} + +type PlanGlobalRow struct { + SQLID string `db:"SQL_ID"` + ChildNumber sql.NullInt64 `db:"CHILD_NUMBER"` + PlanCreated sql.NullString `db:"TIMESTAMP"` + OptimizerMode sql.NullString `db:"OPTIMIZER"` + Other sql.NullString `db:"OTHER"` + Executions sql.NullString `db:"EXECUTIONS"` + PDBName sql.NullString `db:"PDB_NAME"` +} +type PlanStepRows struct { + Operation sql.NullString `db:"OPERATION"` + Options sql.NullString `db:"OPTIONS"` + ObjectOwner sql.NullString `db:"OBJECT_OWNER"` + ObjectName sql.NullString `db:"OBJECT_NAME"` + ObjectAlias sql.NullString `db:"OBJECT_ALIAS"` + ObjectType sql.NullString `db:"OBJECT_TYPE"` + PlanStepId sql.NullInt64 `db:"ID"` + ParentId sql.NullInt64 `db:"PARENT_ID"` + Depth sql.NullInt64 `db:"DEPTH"` + Position sql.NullInt64 `db:"POSITION"` + SearchColumns sql.NullInt64 `db:"SEARCH_COLUMNS"` + Cost sql.NullFloat64 `db:"COST"` + Cardinality sql.NullFloat64 `db:"CARDINALITY"` + Bytes sql.NullFloat64 `db:"BYTES"` + PartitionStart sql.NullString `db:"PARTITION_START"` + PartitionStop sql.NullString `db:"PARTITION_STOP"` + CPUCost sql.NullFloat64 `db:"CPU_COST"` + IOCost sql.NullFloat64 `db:"IO_COST"` + TempSpace sql.NullFloat64 `db:"TEMP_SPACE"` + AccessPredicates sql.NullString `db:"ACCESS_PREDICATES"` + FilterPredicates sql.NullString `db:"FILTER_PREDICATES"` + Projection sql.NullString `db:"PROJECTION"` + LastStarts *uint64 `db:"LAST_STARTS"` + LastOutputRows *uint64 `db:"LAST_OUTPUT_ROWS"` + LastCRBufferGets *uint64 `db:"LAST_CR_BUFFER_GETS"` + LastDiskReads *uint64 `db:"LAST_DISK_READS"` + LastDiskWrites *uint64 `db:"LAST_DISK_WRITES"` + LastElapsedTime *uint64 `db:"LAST_ELAPSED_TIME"` + LastMemoryUsed *uint64 `db:"LAST_MEMORY_USED"` + LastDegree *uint64 `db:"LAST_DEGREE"` + LastTempsegSize *uint64 `db:"LAST_TEMPSEG_SIZE"` +} +type PlanRows struct { + PlanGlobalRow + PlanStepRows +} + func ConstructStatementMetricsQueryBlock(sqlHandleColumn string, whereClause string, bindPlaceholder string) string { return fmt.Sprintf(STATEMENT_METRICS_QUERY, sqlHandleColumn, sqlHandleColumn, bindPlaceholder, whereClause, sqlHandleColumn) } @@ -270,19 +434,18 @@ func (c *Check) StatementMetrics() (int, error) { SQLCount := 0 totalSQLTextTimeUs := int64(0) var oracleRows []OracleRow - + var planErrors uint16 if c.config.QueryMetrics.Enabled { if c.config.InstanceConfig.QueryMetrics.IncludeDatadogQueries { var DDForceMatchingSignatures []string - /* * When we want to capture the Datadog Agent queries, we're explicitly looking for them in v$sqlstats, because * they are excluded from query samples and therefore won't be found in c.statementsCache */ err = c.db.Select( &DDForceMatchingSignatures, - "SELECT distinct force_matching_signature FROM v$sqlstats WHERE sql_text like '%/* DD%' and (sysdate-last_active_time)*3600*24 < :1", - time.Since(c.statementsLastRun).Seconds(), + "SELECT distinct force_matching_signature FROM v$sqlstats WHERE sql_text like '%/* DD%' and (sysdate-last_active_time)*3600*24 <= :1", + time.Since(c.statementsLastRun).Seconds()+1, ) if err != nil { log.Error("error getting sql_ids from DD queries") @@ -324,7 +487,9 @@ func (c *Check) StatementMetrics() (int, error) { o := obfuscate.NewObfuscator(obfuscate.Config{SQL: c.config.ObfuscatorOptions}) defer o.Stop() var diff OracleRowMonotonicCount + planErrors = 0 FQTSent := make(map[string]int) + executionPlanSent := make(map[uint64]int) for _, statementMetricRow := range statementMetricsAll { newCache[statementMetricRow.StatementMetricsKeyDB] = statementMetricRow.StatementMetricsMonotonicCountDB previousMonotonic, exists := c.statementMetricsMonotonicCountsPrevious[statementMetricRow.StatementMetricsKeyDB] @@ -483,12 +648,14 @@ func (c *Check) StatementMetrics() (int, error) { if err != nil { log.Errorf("query metrics statements error named exec %s %s %+v", err, SQLTextQuery, p) SQLTextErrors++ + if rows != nil { + rows.Close() + } continue } - defer rows.Close() - rows.Next() cols, err := rows.SliceScan() + rows.Close() totalSQLTextTimeUs += time.Since(startSQLText).Microseconds() if err != nil { @@ -497,14 +664,12 @@ func (c *Check) StatementMetrics() (int, error) { continue } SQLStatement = cols[0].(string) - obfuscatedStatement, err := c.GetObfuscatedStatement(o, SQLStatement) SQLStatement = obfuscatedStatement.Statement if err == nil { queryRow.QuerySignature = obfuscatedStatement.QuerySignature queryRow.Commands = obfuscatedStatement.Commands queryRow.Tables = obfuscatedStatement.Tables - if c.config.InstanceConfig.QueryMetrics.IncludeDatadogQueries { cacheEntry := StatementsCacheData{ statement: SQLStatement, @@ -561,6 +726,178 @@ func (c *Check) StatementMetrics() (int, error) { sender.EventPlatformEvent(FQTPayloadBytes, "dbm-samples") FQTSent[queryRow.QuerySignature] = 1 } + + if c.config.ExecutionPlans.Enabled { + _, ok = executionPlanSent[statementMetricRow.PlanHashValue] + if !ok { + var planStepsPayload []PlanDefinition + var planStepsDB []PlanRows + var oraclePlan OraclePlan + err = c.db.Select(&planStepsDB, PLAN_QUERY, statementMetricRow.PlanHashValue) + + if err == nil { + for _, stepRow := range planStepsDB { + var stepPayload PlanDefinition + if stepRow.Operation.Valid { + stepPayload.Operation = stepRow.Operation.String + } + if stepRow.Options.Valid { + stepPayload.Options = stepRow.Options.String + } + if stepRow.ObjectOwner.Valid { + stepPayload.ObjectOwner = stepRow.ObjectOwner.String + } + if stepRow.ObjectName.Valid { + stepPayload.ObjectName = stepRow.ObjectName.String + } + if stepRow.ObjectAlias.Valid { + stepPayload.ObjectAlias = stepRow.ObjectAlias.String + } + if stepRow.ObjectType.Valid { + stepPayload.ObjectType = stepRow.ObjectType.String + } + if stepRow.PlanStepId.Valid { + stepPayload.PlanStepId = stepRow.PlanStepId.Int64 + } + if stepRow.ParentId.Valid { + stepPayload.ParentId = stepRow.ParentId.Int64 + } + if stepRow.Depth.Valid { + stepPayload.Depth = stepRow.Depth.Int64 + } + if stepRow.Position.Valid { + stepPayload.Position = stepRow.Position.Int64 + } + if stepRow.SearchColumns.Valid { + stepPayload.SearchColumns = stepRow.SearchColumns.Int64 + } + if stepRow.Cost.Valid { + stepPayload.Cost = stepRow.Cost.Float64 + } + if stepRow.Cardinality.Valid { + stepPayload.Cardinality = stepRow.Cardinality.Float64 + } + if stepRow.Bytes.Valid { + stepPayload.Bytes = stepRow.Bytes.Float64 + } + if stepRow.PartitionStart.Valid { + stepPayload.PartitionStart = stepRow.PartitionStart.String + } + if stepRow.PartitionStop.Valid { + stepPayload.PartitionStop = stepRow.PartitionStop.String + } + if stepRow.CPUCost.Valid { + stepPayload.CPUCost = stepRow.CPUCost.Float64 + } + if stepRow.IOCost.Valid { + stepPayload.IOCost = stepRow.IOCost.Float64 + } + if stepRow.TempSpace.Valid { + stepPayload.TempSpace = stepRow.TempSpace.Float64 + } + if stepRow.AccessPredicates.Valid { + obfuscated, err := o.ObfuscateSQLString(stepRow.AccessPredicates.String) + if err == nil { + stepPayload.AccessPredicates = obfuscated.Query + } else { + stepPayload.AccessPredicates = "obfuscation error" + log.Errorf("Access obfuscation error") + } + } + if stepRow.FilterPredicates.Valid { + obfuscated, err := o.ObfuscateSQLString(stepRow.FilterPredicates.String) + if err == nil { + stepPayload.FilterPredicates = obfuscated.Query + } else { + stepPayload.FilterPredicates = "obfuscation error" + log.Errorf("Filter obfuscation error") + } + } + if stepRow.Projection.Valid { + stepPayload.Projection = stepRow.Projection.String + } + if stepRow.LastStarts != nil { + stepPayload.LastStarts = *stepRow.LastStarts + } + if stepRow.LastOutputRows != nil { + stepPayload.LastOutputRows = *stepRow.LastOutputRows + } + if stepRow.LastCRBufferGets != nil { + stepPayload.LastCRBufferGets = *stepRow.LastCRBufferGets + } + if stepRow.LastDiskReads != nil { + stepPayload.LastDiskReads = *stepRow.LastDiskReads + } + if stepRow.LastDiskWrites != nil { + stepPayload.LastDiskWrites = *stepRow.LastDiskWrites + } + if stepRow.LastElapsedTime != nil { + stepPayload.LastElapsedTime = *stepRow.LastElapsedTime + } + if stepRow.LastMemoryUsed != nil { + stepPayload.LastMemoryUsed = *stepRow.LastMemoryUsed + } + if stepRow.LastDegree != nil { + stepPayload.LastDegree = *stepRow.LastDegree + } + if stepRow.LastTempsegSize != nil { + stepPayload.LastTempsegSize = *stepRow.LastTempsegSize + } + if stepRow.PlanCreated.Valid && stepRow.PlanCreated.String != "" { + oraclePlan.Timestamp = stepRow.PlanCreated.String + } + if stepRow.OptimizerMode.Valid && stepRow.OptimizerMode.String != "" { + oraclePlan.OptimizerMode = stepRow.OptimizerMode.String + } + if stepRow.Other.Valid && stepRow.Other.String != "" { + oraclePlan.Other = stepRow.Other.String + } + if stepRow.PDBName.Valid && stepRow.PDBName.String != "" { + oraclePlan.PDBName = stepRow.PDBName.String + } + oraclePlan.SQLID = stepRow.SQLID + + planStepsPayload = append(planStepsPayload, stepPayload) + } + oraclePlan.PlanHashValue = statementMetricRow.PlanHashValue + planStatementMetadata := PlanStatementMetadata{ + Tables: queryRow.Tables, + Commands: queryRow.Commands, + } + planPlanDB := PlanPlanDB{ + Definition: planStepsPayload, + Signature: strconv.FormatUint(statementMetricRow.PlanHashValue, 10), + } + planDB := PlanDB{ + Instance: c.cdbName, + Plan: planPlanDB, + QuerySignature: queryRow.QuerySignature, + Statement: SQLStatement, + Metadata: planStatementMetadata, + } + planPayload := PlanPayload{ + Timestamp: float64(time.Now().UnixMilli()), + Host: c.dbHostname, + AgentVersion: c.agentVersion, + Source: common.IntegrationName, + Tags: strings.Join(c.tags, ","), + DBMType: "plan", + PlanDB: planDB, + OraclePlan: oraclePlan, + } + planPayloadBytes, err := json.Marshal(planPayload) + if err != nil { + log.Errorf("Error marshalling plan payload: %s", err) + } + + sender.EventPlatformEvent(planPayloadBytes, "dbm-samples") + log.Tracef("Plan payload %+v", string(planPayloadBytes)) + } else { + planErrors++ + log.Errorf("failed getting execution plan %s for plan_hash_value: %d", err, statementMetricRow.PlanHashValue) + } + } + } } c.copyToPreviousMap(newCache) @@ -608,6 +945,9 @@ func (c *Check) StatementMetrics() (int, error) { sender.Gauge("dd.oracle.statements_metrics.sql_text_errors", float64(SQLTextErrors), "", c.tags) sender.Gauge("dd.oracle.statements_metrics.time_ms", float64(time.Since(start).Milliseconds()), "", c.tags) sender.Gauge("dd.oracle.statements.sqltext.time_ms", math.Round(float64(totalSQLTextTimeUs/1000)), "", c.tags) + if c.config.ExecutionPlans.Enabled { + sender.Gauge("dd.oracle.plan_errors.count", float64(planErrors), "", c.tags) + } sender.Commit() c.statementsFilter.SQLIDs = nil @@ -617,5 +957,8 @@ func (c *Check) StatementMetrics() (int, error) { c.statementsLastRun = start + if SQLTextErrors > 0 || planErrors > 0 { + return SQLCount, fmt.Errorf("SQL statements processed: %d, text errors: %d, plan erros: %d", SQLCount, SQLTextErrors, planErrors) + } return SQLCount, nil } diff --git a/pkg/collector/corechecks/oracle-dbm/statements_test.go b/pkg/collector/corechecks/oracle-dbm/statements_test.go index 08c7861c0e8f6..d1f6f107e1d97 100644 --- a/pkg/collector/corechecks/oracle-dbm/statements_test.go +++ b/pkg/collector/corechecks/oracle-dbm/statements_test.go @@ -12,14 +12,12 @@ import ( "log" "testing" - //"github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/oracle-dbm/common" "github.com/jmoiron/sqlx" "github.com/stretchr/testify/assert" ) func TestUInt64Binding(t *testing.T) { - //aggregator.InitAndStartAgentDemultiplexer(demuxOpts(), "") initAndStartAgentDemultiplexer() chk.dbmEnabled = true @@ -69,7 +67,6 @@ func TestUInt64Binding(t *testing.T) { assert.NoError(t, err, "query metrics with %s driver", driver) assert.Equal(t, 1, n, "total query metrics captured with %s driver", driver) - //slice := []any{uint64(17202440635181618732)} slice := []any{"17202440635181618732"} var retValue int err = chk.db.Get(&retValue, "SELECT COUNT(*) FROM v$sqlstats WHERE force_matching_signature IN (:1)", slice...) @@ -101,3 +98,19 @@ func TestUInt64Binding(t *testing.T) { } } } + +func TestStatementMetrics(t *testing.T) { + initAndStartAgentDemultiplexer() + chk.config.QueryMetrics.IncludeDatadogQueries = true + var retValue int + for i := 1; i <= 2; i++ { + err := chk.db.Get(&retValue, "SELECT /* DD */ 1 FROM dual") + if err != nil { + log.Fatalf("row error %s", err) + return + } + chk.SampleSession() + _, err = chk.StatementMetrics() + assert.NoErrorf(t, err, "statement metrics check failed") + } +} diff --git a/pkg/collector/corechecks/sbom/check.go b/pkg/collector/corechecks/sbom/check.go index 8557011ccb329..afb0ad3649a58 100644 --- a/pkg/collector/corechecks/sbom/check.go +++ b/pkg/collector/corechecks/sbom/check.go @@ -8,6 +8,7 @@ package sbom import ( + "errors" "time" yaml "gopkg.in/yaml.v2" @@ -15,6 +16,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" + ddConfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/workloadmeta" ) @@ -112,6 +114,10 @@ func CheckFactory() check.Check { // Configure parses the check configuration and initializes the sbom check func (c *Check) Configure(integrationConfigDigest uint64, config, initConfig integration.Data, source string) error { + if !ddConfig.Datadog.GetBool("sbom.enabled") { + return errors.New("collection of SBOM is disabled") + } + if err := c.CommonConfigure(integrationConfigDigest, initConfig, config, source); err != nil { return err } @@ -125,7 +131,7 @@ func (c *Check) Configure(integrationConfigDigest uint64, config, initConfig int return err } - c.processor, err = newProcessor(c.workloadmetaStore, sender, c.instance.ChunkSize, time.Duration(c.instance.NewSBOMMaxLatencySeconds)*time.Second, c.instance.HostSBOM) + c.processor, err = newProcessor(c.workloadmetaStore, sender, c.instance.ChunkSize, time.Duration(c.instance.NewSBOMMaxLatencySeconds)*time.Second, ddConfig.Datadog.GetBool("sbom.host.enabled")) if err != nil { return err } diff --git a/pkg/collector/corechecks/sbom/processor_test.go b/pkg/collector/corechecks/sbom/processor_test.go index 9fc70046dd245..fd38b9ec0f615 100644 --- a/pkg/collector/corechecks/sbom/processor_test.go +++ b/pkg/collector/corechecks/sbom/processor_test.go @@ -399,7 +399,7 @@ func TestProcessEvents(t *testing.T) { assert.Nil(t, err) defer os.RemoveAll(cacheDir) cfg.Set("sbom.cache_directory", cacheDir) - cfg.Set("container_image_collection.sbom.enabled", true) + cfg.Set("sbom.container_image.enabled", true) _, err = sbomscanner.CreateGlobalScanner(cfg) assert.Nil(t, err) diff --git a/pkg/collector/corechecks/snmp/snmp_test.go b/pkg/collector/corechecks/snmp/snmp_test.go index 93d03b8e0c189..aa7ecd0527b9b 100644 --- a/pkg/collector/corechecks/snmp/snmp_test.go +++ b/pkg/collector/corechecks/snmp/snmp_test.go @@ -991,10 +991,10 @@ namespace: nsSubnet err = checkSubnet.Configure(integration.FakeConfigHash, rawInstanceConfigSubnet, []byte(``), "test") assert.Nil(t, err) - assert.Equal(t, check.ID("snmp:default:1.1.1.1:d03f28dacffc6886"), check1.ID()) - assert.Equal(t, check.ID("snmp:default:2.2.2.2:b757d26210a16a9e"), check2.ID()) - assert.Equal(t, check.ID("snmp:ns3:3.3.3.3:cc7fb36641d79afd"), check3.ID()) - assert.Equal(t, check.ID("snmp:nsSubnet:10.10.10.0/24:6b68b30a87454899"), checkSubnet.ID()) + assert.Equal(t, check.ID("snmp:default:1.1.1.1:9d3f14dbaceba72d"), check1.ID()) + assert.Equal(t, check.ID("snmp:default:2.2.2.2:9c51b342e7a4fdd5"), check2.ID()) + assert.Equal(t, check.ID("snmp:ns3:3.3.3.3:7e1c698677986eca"), check3.ID()) + assert.Equal(t, check.ID("snmp:nsSubnet:10.10.10.0/24:ae80a9e88fe6643e"), checkSubnet.ID()) assert.NotEqual(t, check1.ID(), check2.ID()) } diff --git a/pkg/collector/corechecks/system/cpu/cpu_windows.go b/pkg/collector/corechecks/system/cpu/cpu_windows.go index 83b484c1076b9..19f40bf035ac5 100644 --- a/pkg/collector/corechecks/system/cpu/cpu_windows.go +++ b/pkg/collector/corechecks/system/cpu/cpu_windows.go @@ -15,7 +15,7 @@ import ( "fmt" "strconv" - "github.com/DataDog/gohai/cpu" + "github.com/DataDog/datadog-agent/pkg/gohai/cpu" "github.com/DataDog/datadog-agent/pkg/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/collector/check" diff --git a/pkg/collector/python/check.go b/pkg/collector/python/check.go index 66be71d472c5e..d070650a00921 100644 --- a/pkg/collector/python/check.go +++ b/pkg/collector/python/check.go @@ -303,6 +303,8 @@ func (c *PythonCheck) Configure(integrationConfigDigest uint64, data integration s.FinalizeCheckServiceTag() } + s.SetNoIndex(commonOptions.NoIndex) + c.initConfig = string(initConfig) c.instanceConfig = string(data) diff --git a/pkg/compliance/agent.go b/pkg/compliance/agent.go index 1f5155b5a1134..2a02c1cbb141e 100644 --- a/pkg/compliance/agent.go +++ b/pkg/compliance/agent.go @@ -7,7 +7,6 @@ package compliance import ( "context" - "encoding/json" "expvar" "fmt" "hash/fnv" @@ -15,6 +14,8 @@ import ( "sync" "time" + "github.com/DataDog/datadog-agent/pkg/compliance/aptconfig" + "github.com/DataDog/datadog-agent/pkg/compliance/k8sconfig" "github.com/DataDog/datadog-agent/pkg/compliance/metrics" "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/security/common" @@ -38,7 +39,7 @@ type AgentOptions struct { // Reporter is the output interface of the events that are gathered by the // agent. - Reporter Reporter + Reporter *LogReporter // RuleFilter allow specifying a global rule filtering that will be // applied on all loaded benchmarks. @@ -171,6 +172,18 @@ func (a *Agent) Start() error { wg.Done() }() + wg.Add(1) + go func() { + a.runKubernetesConfigurationsExport(ctx) + wg.Done() + }() + + wg.Add(1) + go func() { + a.runAptConfigurationExport(ctx) + wg.Done() + }() + go func() { <-ctx.Done() wg.Wait() @@ -264,7 +277,7 @@ func (a *Agent) runXCCDFBenchmarks(ctx context.Context) { return } for _, rule := range benchmark.Rules { - events := EvaluateXCCDFRule(ctx, a.opts.Hostname, benchmark, rule) + events := EvaluateXCCDFRule(ctx, a.opts.Hostname, a.opts.StatsdClient, benchmark, rule) a.reportEvents(ctx, benchmark, events) if sleepAborted(ctx, throttler.C) { return @@ -277,19 +290,64 @@ func (a *Agent) runXCCDFBenchmarks(ctx context.Context) { } } +func (a *Agent) runKubernetesConfigurationsExport(ctx context.Context) { + if !config.IsKubernetes() { + return + } + + runTicker := time.NewTicker(a.opts.CheckInterval) + defer runTicker.Stop() + + for i := 0; ; i++ { + seed := fmt.Sprintf("%s%s%d", a.opts.Hostname, "kubernetes-configuration", i) + jitter := randomJitter(seed, a.opts.RunJitterMax) + if sleepAborted(ctx, time.After(jitter)) { + return + } + k8sResourceType, k8sResourceData := k8sconfig.LoadConfiguration(ctx, a.opts.HostRoot) + k8sResourceLog := NewResourceLog(a.opts.Hostname, k8sResourceType, k8sResourceData) + a.opts.Reporter.ReportEvent(k8sResourceLog) + if sleepAborted(ctx, runTicker.C) { + return + } + } +} + +func (a *Agent) runAptConfigurationExport(ctx context.Context) { + ruleFilterModel := module.NewRuleFilterModel() + seclRuleFilter := rules.NewSECLRuleFilter(ruleFilterModel) + accepted, err := seclRuleFilter.IsRuleAccepted(&rules.RuleDefinition{ + Filters: []string{aptconfig.SeclFilter}, + }) + if !accepted || err != nil { + return + } + + runTicker := time.NewTicker(a.opts.CheckInterval) + defer runTicker.Stop() + + for i := 0; ; i++ { + seed := fmt.Sprintf("%s%s%d", a.opts.Hostname, "apt-configuration", i) + jitter := randomJitter(seed, a.opts.RunJitterMax) + if sleepAborted(ctx, time.After(jitter)) { + return + } + aptResourceType, aptResourceData := aptconfig.LoadConfiguration(ctx, a.opts.HostRoot) + aptResourceLog := NewResourceLog(a.opts.Hostname, aptResourceType, aptResourceData) + a.opts.Reporter.ReportEvent(aptResourceLog) + if sleepAborted(ctx, runTicker.C) { + return + } + } +} + func (a *Agent) reportEvents(ctx context.Context, benchmark *Benchmark, events []*CheckEvent) { for _, event := range events { a.updateEvent(event) if event.Result == CheckSkipped { continue } - buf, err := json.Marshal(event) - if err != nil { - log.Errorf("failed to serialize event from benchmark=%s rule=%s: %v", benchmark.FrameworkID, event.RuleID, err) - } else { - log.Tracef("received event from benchmark=%s rule=%s: %s", benchmark.FrameworkID, event.RuleID, buf) - a.opts.Reporter.ReportRaw(buf, "") - } + a.opts.Reporter.ReportEvent(event) } } diff --git a/pkg/compliance/aptconfig/aptconfig.go b/pkg/compliance/aptconfig/aptconfig.go new file mode 100644 index 0000000000000..5515537f8368b --- /dev/null +++ b/pkg/compliance/aptconfig/aptconfig.go @@ -0,0 +1,297 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package aptconfig + +import ( + "context" + "io" + "io/fs" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "unicode" + + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +// SeclFilter only selects ubuntu hosts for now on which is want to test out +// and apply unattended upgrades checks. +const SeclFilter = `os.id == "ubuntu"` + +const ( + resourceType = "host_apt_config" + + aptConfFile = "/etc/apt/apt.conf" + aptConfFragmentsDir = "/etc/apt/apt.conf.d" + systemdConfDir = "/etc/systemd/system" +) + +// LoadConfiguration exports the aggregated APT configuration file and parts +// of the systemd configuration files related to APT timers. +func LoadConfiguration(ctx context.Context, hostroot string) (string, interface{}) { + defer func() { + if err := recover(); err != nil { + log.Warnf("could not parse APT configuration properly: %v", err) + } + }() + + aptConfDir := filepath.Join(hostroot, aptConfFragmentsDir) + aptConfFiles, _ := filepath.Glob(filepath.Join(aptConfDir, "*")) + sort.Strings(aptConfFiles) + aptConfFiles = append([]string{filepath.Join(hostroot, aptConfFile)}, aptConfFiles...) + + aptConfs := make(map[string]interface{}) + for _, path := range aptConfFiles { + data, err := readFileLimit(path) + if err == nil { + conf := parseAPTConfiguration(data) + for k, v := range conf { + aptConfs[k] = v + } + } + } + + systemdConfDir := filepath.Join(hostroot, systemdConfDir) + systemdTimersConfs := make(map[string]interface{}) + var systemdConfFiles []string + _ = filepath.Walk(systemdConfDir, func(path string, info fs.FileInfo, err error) error { + if err != nil || info.IsDir() { + return err + } + base := filepath.Base(path) + if base == "apt-daily-upgrade.timer" || base == "apt-daily.timer" { + systemdConfFiles = append(systemdConfFiles, path) + } + return nil + }) + sort.Strings(systemdConfFiles) + + for _, path := range systemdConfFiles { + data, err := readFileLimit(path) + if err == nil { + base := filepath.Base(path) + conf := parseSystemdConf(data) + systemdTimersConfs[base] = conf + } + } + + resourceData := map[string]interface{}{ + "apt": aptConfs, + "systemd": map[string]interface{}{ + "timers": systemdTimersConfs, + }, + } + + return resourceType, resourceData +} + +type tokenType int + +const ( + eos tokenType = iota + literal + blockStart + blockEnd + data + equal + comment + comma + parseError +) + +type token struct { + kind tokenType + value string +} + +func parseAPTConfiguration(str string) map[string]interface{} { + conf := make(map[string]interface{}) + var cursor []string + var key string +loop: + for { + var tok token + str, tok = nextTokenAPT(str) + switch tok.kind { + case blockStart: + cursor = append(cursor, key) + case blockEnd: + if len(cursor) > 0 { + cursor = cursor[:len(cursor)-1] + } + key = "" + case literal: + key = strings.Join(append(cursor, tok.value), "::") + case data: + if key != "" { + if v, ok := conf[key]; ok { + if a, ok := v.([]string); ok { + conf[key] = append(a, tok.value) + } else if s, ok := v.(string); ok { + conf[key] = append([]string{s}, tok.value) + } + } else { + conf[key] = tok.value + } + } + case comment, comma: + case eos: + break loop + default: + break loop + } + } + return conf +} + +// man apt.conf.5: https://manpages.ubuntu.com/manpages/trusty/man5/apt.conf.5.html +// +// > Syntactically the configuration language is modeled after what the ISC +// > tools such as bind and dhcp use. Lines starting with // are treated as +// > comments (ignored), as well as all text between /* and */, just like C/C++ +// > comments. Each line is of the form APT::Get::Assume-Yes "true";. The +// > quotation marks and trailing semicolon are required. The value must be on +// > one line, and there is no kind of string concatenation. Values must not +// > include backslashes or extra quotation marks. Option names are made up of +// > alphanumeric characters and the characters "/-:._+". A new scope can be +// > opened with curly braces, like this: +func nextTokenAPT(str string) (string, token) { + str = eatWhitespace(str) + if len(str) == 0 { + return "", token{kind: eos} + } + var t token + c := str[0] + i := 0 + switch { + case c == '/' && strings.HasPrefix(str, "/*"): + t.kind = comment + i = 2 + for _, r := range str[2:] { + i++ + if r == '*' && strings.HasPrefix(str[i:], "*/") { + i++ + break + } + } + case c == '/' && strings.HasPrefix(str, "//"): + t.kind = comment + i = 2 + for _, r := range str[2:] { + i++ + if r == '\n' { + break + } + } + case c == '#': + t.kind = comment + i = 1 + for _, r := range str[1:] { + i++ + if r == '\n' { + break + } + } + case (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z'): + t.kind = literal + i = 1 + for _, r := range str[1:] { + if !isLiteral(r) { + break + } + i++ + } + case c == '{': + t.kind = blockStart + i = 1 + case c == ';': + t.kind = comma + i = 1 + case c == '}': + t.kind = blockEnd + i = 1 + case c == '"': + ok := false + t.kind = data + i = 1 + for _, r := range str[1:] { + i++ + if r == '"' { + if value, err := strconv.Unquote(str[:i]); err == nil { + ok = true + t.value = value + break + } + } + } + if !ok { + t.kind = parseError + } + } + if i == 0 { + t.kind = parseError + } else if t.kind != data { + t.value = str[:i] + } + return str[i:], t +} + +func isLiteral(r rune) bool { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + (r == '/') || (r == '-') || (r == ':') || (r == '.') || (r == '_') || (r == '+') +} + +func eatWhitespace(str string) string { + i := 0 + for _, r := range str { + if !unicode.IsSpace(r) { + break + } + i++ + } + return str[i:] +} + +func readFileLimit(path string) (string, error) { + const maxSize = 64 * 1024 + f, err := os.Open(path) + if err != nil { + return "", err + } + data, err := ioutil.ReadAll(io.LimitReader(f, maxSize)) + if err != nil { + return "", err + } + return string(data), nil +} + +// systemd configuration syntax: +// https://www.freedesktop.org/software/systemd/man/systemd.syntax.html +func parseSystemdConf(str string) map[string]string { + lines := strings.Split(str, "\n") + conf := make(map[string]string) + var section = "" + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + if strings.HasPrefix(line, "[") { + section = strings.Replace(line[1:], "]", "", 1) + } else if section != "" { + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 { + conf[section+"/"+parts[0]] = parts[1] + } + } + } + return conf +} diff --git a/pkg/compliance/aptconfig/aptconfig_test.go b/pkg/compliance/aptconfig/aptconfig_test.go new file mode 100644 index 0000000000000..b6dce70ecfafa --- /dev/null +++ b/pkg/compliance/aptconfig/aptconfig_test.go @@ -0,0 +1,105 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package aptconfig + +import ( + "io" + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAptConfigParser(t *testing.T) { + f, err := os.Open("testdata/apt.conf") + if err != nil { + t.Fatal(err) + } + data, err := io.ReadAll(f) + if err != nil { + t.Fatal(err) + } + conf := parseAPTConfiguration(string(data)) + expected := map[string]interface{}{ + "APT::Move-Autobit-Sections": []string{ + "oldlibs", + "contrib/oldlibs", + "non-free/oldlibs", + "restricted/oldlibs", + "universe/oldlibs", + "multiverse/oldlibs", + }, + "APT::Never-MarkAuto-Sections": []string{ + "metapackages", + "contrib/metapackages", + "non-free/metapackages", + "restricted/metapackages", + "universe/metapackages", + "multiverse/metapackages", + }, + "APT::NeverAutoRemove": []string{ + "^firmware-linux.*", + "^linux-firmware$", + "^linux-image-[a-z0-9]*$", + "^linux-image-[a-z0-9]*-[a-z0-9]*$", + }, + "APT::Periodic::Enable": "0", + "APT::Periodic::Unattended-Upgrade": "1", + "APT::Periodic::Update-Package-Lists": "1", + "APT::Update::Post-Invoke": "rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true", + "APT::Update::Post-Invoke-Success": "/usr/bin/test -e /usr/share/dbus-1/system-services/org.freedesktop.PackageKit.service \u0026\u0026 /usr/bin/test -S /var/run/dbus/system_bus_socket \u0026\u0026 /usr/bin/gdbus call --system --dest org.freedesktop.PackageKit --object-path /org/freedesktop/PackageKit --timeout 4 --method org.freedesktop.PackageKit.StateHasChanged cache-update \u003e /dev/null; /bin/echo \u003e /dev/null", + "APT::VersionedKernelPackages": []string{ + "linux-.*", + "kfreebsd-.*", + "gnumach-.*", + ".*-modules", + ".*-kernel", + }, + "Acquire::Changelogs::AlwaysOnline": "true", + "Acquire::CompressionTypes::Order::": "gz", + "Acquire::GzipIndexes": "true", + "Acquire::Languages": "none", + "Acquire::http::User-Agent-Non-Interactive": "true", + "Apt::AutoRemove::SuggestsImportant": "false", + "DPkg::Post-Invoke": []string{ + "/usr/bin/test -e /usr/share/dbus-1/system-services/org.freedesktop.PackageKit.service \u0026\u0026 /usr/bin/test -S /var/run/dbus/system_bus_socket \u0026\u0026 /usr/bin/gdbus call --system --dest org.freedesktop.PackageKit --object-path /org/freedesktop/PackageKit --timeout 4 --method org.freedesktop.PackageKit.StateHasChanged cache-update \u003e /dev/null; /bin/echo \u003e /dev/null", + "rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true", + }, + "DPkg::Pre-Install-Pkgs": "/usr/sbin/dpkg-preconfigure --apt || true", + "Dir::Cache::pkgcache": "", + "Dir::Cache::srcpkgcache": "", + "Unattended-Upgrade::Allowed-Origins": []string{ + "${distro_id}:${distro_codename}", + "${distro_id}:${distro_codename}-security", + "${distro_id}ESMApps:${distro_codename}-apps-security", + "${distro_id}ESM:${distro_codename}-infra-security", + }, + "Unattended-Upgrade::DevRelease": "auto", + } + assert.Equal(t, expected, conf) +} + +func TestSystemdConfigParser(t *testing.T) { + f, err := os.Open("testdata/apt-daily.timer") + if err != nil { + t.Fatal(err) + } + data, err := io.ReadAll(f) + if err != nil { + t.Fatal(err) + } + conf := parseSystemdConf(string(data)) + expected := map[string]string{ + "Install/WantedBy": "timers.target", + "Unit/Description": "Message of the Day", + "Timer/OnCalendar": "00,12:00:00", + "Timer/RandomizedDelaySec": "12h", + "Timer/Persistent": "true", + "Timer/OnStartupSec": "1min", + } + assert.Equal(t, expected, conf) + +} diff --git a/pkg/compliance/aptconfig/testdata/apt-daily.timer b/pkg/compliance/aptconfig/testdata/apt-daily.timer new file mode 100644 index 0000000000000..8d3930c21f6ec --- /dev/null +++ b/pkg/compliance/aptconfig/testdata/apt-daily.timer @@ -0,0 +1,13 @@ +[Install] +WantedBy=timers.target +[Unit] +Description=Message of the Day + +[Timer] +OnCalendar=00,12:00:00 +RandomizedDelaySec=12h +Persistent=true +OnStartupSec=1min + +[Install] +WantedBy=timers.target diff --git a/pkg/compliance/aptconfig/testdata/apt.conf b/pkg/compliance/aptconfig/testdata/apt.conf new file mode 100644 index 0000000000000..3a4d1699942e3 --- /dev/null +++ b/pkg/compliance/aptconfig/testdata/apt.conf @@ -0,0 +1,211 @@ +Acquire::Changelogs::AlwaysOnline "true"; +Acquire::http::User-Agent-Non-Interactive "true"; +APT +{ + NeverAutoRemove + { + "^firmware-linux.*"; + "^linux-firmware$"; + "^linux-image-[a-z0-9]*$"; + "^linux-image-[a-z0-9]*-[a-z0-9]*$"; + }; + + VersionedKernelPackages + { + # kernels + "linux-.*"; + "kfreebsd-.*"; + "gnumach-.*"; + # (out-of-tree) modules + ".*-modules"; + ".*-kernel"; + }; + + Never-MarkAuto-Sections + { + "metapackages"; + "contrib/metapackages"; + "non-free/metapackages"; + "restricted/metapackages"; + "universe/metapackages"; + "multiverse/metapackages"; + }; + + Move-Autobit-Sections + { + "oldlibs"; + "contrib/oldlibs"; + "non-free/oldlibs"; + "restricted/oldlibs"; + "universe/oldlibs"; + "multiverse/oldlibs"; + }; +}; +APT::Periodic::Update-Package-Lists "1"; +APT::Periodic::Unattended-Upgrade "1"; +// THIS FILE IS USED TO INFORM PACKAGEKIT +// THAT THE UPDATE-INFO MIGHT HAVE CHANGED + +// Whenever dpkg is called we might have different updates +// i.e. if an user removes a package that had an update +DPkg::Post-Invoke { +"/usr/bin/test -e /usr/share/dbus-1/system-services/org.freedesktop.PackageKit.service && /usr/bin/test -S /var/run/dbus/system_bus_socket && /usr/bin/gdbus call --system --dest org.freedesktop.PackageKit --object-path /org/freedesktop/PackageKit --timeout 4 --method org.freedesktop.PackageKit.StateHasChanged cache-update > /dev/null; /bin/echo > /dev/null"; +}; + +// When Apt's cache is updated (i.e. apt-cache update) +APT::Update::Post-Invoke-Success { +"/usr/bin/test -e /usr/share/dbus-1/system-services/org.freedesktop.PackageKit.service && /usr/bin/test -S /var/run/dbus/system_bus_socket && /usr/bin/gdbus call --system --dest org.freedesktop.PackageKit --object-path /org/freedesktop/PackageKit --timeout 4 --method org.freedesktop.PackageKit.StateHasChanged cache-update > /dev/null; /bin/echo > /dev/null"; +}; +// Automatically upgrade packages from these (origin:archive) pairs +// +// Note that in Ubuntu security updates may pull in new dependencies +// from non-security sources (e.g. chromium). By allowing the release +// pocket these get automatically pulled in. +Unattended-Upgrade::Allowed-Origins { + "${distro_id}:${distro_codename}"; + "${distro_id}:${distro_codename}-security"; + // Extended Security Maintenance; doesn't necessarily exist for + // every release and this system may not have it installed, but if + // available, the policy for updates is such that unattended-upgrades + // should also install from here by default. + "${distro_id}ESMApps:${distro_codename}-apps-security"; + "${distro_id}ESM:${distro_codename}-infra-security"; +// "${distro_id}:${distro_codename}-updates"; +// "${distro_id}:${distro_codename}-proposed"; +// "${distro_id}:${distro_codename}-backports"; +}; + +// Python regular expressions, matching packages to exclude from upgrading +Unattended-Upgrade::Package-Blacklist { + // The following matches all packages starting with linux- +// "linux-"; + + // Use $ to explicitely define the end of a package name. Without + // the $, "libc6" would match all of them. +// "libc6$"; +// "libc6-dev$"; +// "libc6-i686$"; + + // Special characters need escaping +// "libstdc\+\+6$"; + + // The following matches packages like xen-system-amd64, xen-utils-4.1, + // xenstore-utils and libxenstore3.0 +// "(lib)?xen(store)?"; + + // For more information about Python regular expressions, see + // https://docs.python.org/3/howto/regex.html +}; + +// This option controls whether the development release of Ubuntu will be +// upgraded automatically. Valid values are "true", "false", and "auto". +Unattended-Upgrade::DevRelease "auto"; + +// This option allows you to control if on a unclean dpkg exit +// unattended-upgrades will automatically run +// dpkg --force-confold --configure -a +// The default is true, to ensure updates keep getting installed +//Unattended-Upgrade::AutoFixInterruptedDpkg "true"; + +// Split the upgrade into the smallest possible chunks so that +// they can be interrupted with SIGTERM. This makes the upgrade +// a bit slower but it has the benefit that shutdown while a upgrade +// is running is possible (with a small delay) +//Unattended-Upgrade::MinimalSteps "true"; + +// Install all updates when the machine is shutting down +// instead of doing it in the background while the machine is running. +// This will (obviously) make shutdown slower. +// Unattended-upgrades increases logind's InhibitDelayMaxSec to 30s. +// This allows more time for unattended-upgrades to shut down gracefully +// or even install a few packages in InstallOnShutdown mode, but is still a +// big step back from the 30 minutes allowed for InstallOnShutdown previously. +// Users enabling InstallOnShutdown mode are advised to increase +// InhibitDelayMaxSec even further, possibly to 30 minutes. +//Unattended-Upgrade::InstallOnShutdown "false"; + +// Send email to this address for problems or packages upgrades +// If empty or unset then no email is sent, make sure that you +// have a working mail setup on your system. A package that provides +// 'mailx' must be installed. E.g. "user@example.com" +//Unattended-Upgrade::Mail ""; + +// Set this value to one of: +// "always", "only-on-error" or "on-change" +// If this is not set, then any legacy MailOnlyOnError (boolean) value +// is used to chose between "only-on-error" and "on-change" +//Unattended-Upgrade::MailReport "on-change"; + +// Remove unused automatically installed kernel-related packages +// (kernel images, kernel headers and kernel version locked tools). +//Unattended-Upgrade::Remove-Unused-Kernel-Packages "true"; + +// Do automatic removal of newly unused dependencies after the upgrade +//Unattended-Upgrade::Remove-New-Unused-Dependencies "true"; + +// Do automatic removal of unused packages after the upgrade +// (equivalent to apt-get autoremove) +//Unattended-Upgrade::Remove-Unused-Dependencies "false"; + +// Automatically reboot *WITHOUT CONFIRMATION* if +// the file /var/run/reboot-required is found after the upgrade +//Unattended-Upgrade::Automatic-Reboot "false"; + +// Automatically reboot even if there are users currently logged in +// when Unattended-Upgrade::Automatic-Reboot is set to true +//Unattended-Upgrade::Automatic-Reboot-WithUsers "true"; + +// If automatic reboot is enabled and needed, reboot at the specific +// time instead of immediately +// Default: "now" +//Unattended-Upgrade::Automatic-Reboot-Time "02:00"; + +// Use apt bandwidth limit feature, this example limits the download +// speed to 70kb/sec +//Acquire::http::Dl-Limit "70"; + +// Enable logging to syslog. Default is False +// Unattended-Upgrade::SyslogEnable "false"; + +// Specify syslog facility. Default is daemon +// Unattended-Upgrade::SyslogFacility "daemon"; + +// Download and install upgrades only on AC power +// (i.e. skip or gracefully stop updates on battery) +// Unattended-Upgrade::OnlyOnACPower "true"; + +// Download and install upgrades only on non-metered connection +// (i.e. skip or gracefully stop updates on a metered connection) +// Unattended-Upgrade::Skip-Updates-On-Metered-Connections "true"; + +// Verbose logging +// Unattended-Upgrade::Verbose "false"; + +// Print debugging information both in unattended-upgrades and +// in unattended-upgrade-shutdown +// Unattended-Upgrade::Debug "false"; + +// Allow package downgrade if Pin-Priority exceeds 1000 +// Unattended-Upgrade::Allow-downgrade "false"; + +// When APT fails to mark a package to be upgraded or installed try adjusting +// candidates of related packages to help APT's resolver in finding a solution +// where the package can be upgraded or installed. +// This is a workaround until APT's resolver is fixed to always find a +// solution if it exists. (See Debian bug #711128.) +// The fallback is enabled by default, except on Debian's sid release because +// uninstallable packages are frequent there. +// Disabling the fallback speeds up unattended-upgrades when there are +// uninstallable packages at the expense of rarely keeping back packages which +// could be upgraded or installed. +// Unattended-Upgrade::Allow-APT-Mark-Fallback "true"; +// Pre-configure all packages with debconf before they are installed. +// If you don't like it, comment it out. +DPkg::Pre-Install-Pkgs {"/usr/sbin/dpkg-preconfigure --apt || true";}; +Apt::AutoRemove::SuggestsImportant "false"; +DPkg::Post-Invoke { "rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true"; }; +APT::Update::Post-Invoke { "rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true"; }; +Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache ""; +APT::Periodic::Enable "0"; +Acquire::GzipIndexes "true"; Acquire::CompressionTypes::Order:: "gz"; +Acquire::Languages "none"; diff --git a/pkg/compliance/data.go b/pkg/compliance/data.go index b2fa6a5296d02..00335c873fa1a 100644 --- a/pkg/compliance/data.go +++ b/pkg/compliance/data.go @@ -72,6 +72,15 @@ type CheckEvent struct { errReason error `json:"-"` } +type ResourceLog struct { + AgentVersion string `json:"agent_version,omitempty"` + ExpireAt time.Time `json:"expire_at,omitempty"` + ResourceType string `json:"resource_type,omitempty"` + ResourceID string `json:"resource_id,omitempty"` + ResourceData interface{} `json:"resource_data,omitempty"` + Tags []string `json:"tags"` +} + func (e *CheckEvent) String() string { s := fmt.Sprintf("%s:%s result=%s", e.FrameworkID, e.RuleID, e.Result) if e.ResourceID != "" { @@ -152,6 +161,17 @@ func NewCheckSkipped( } } +func NewResourceLog(resourceID, resourceType string, resource interface{}) *ResourceLog { + expireAt := time.Now().Add(1 * time.Hour).UTC().Truncate(1 * time.Second) + return &ResourceLog{ + AgentVersion: version.AgentVersion, + ExpireAt: expireAt, + ResourceType: resourceType, + ResourceID: resourceID, + ResourceData: resource, + } +} + type RuleScope string const ( diff --git a/pkg/compliance/evaluator_rego.go b/pkg/compliance/evaluator_rego.go index cae7646588e10..ed42e3bda0c37 100644 --- a/pkg/compliance/evaluator_rego.go +++ b/pkg/compliance/evaluator_rego.go @@ -120,6 +120,8 @@ func newCheckEventFromRegoResult(data interface{}, rule *Rule, resolvedInputs Re result = CheckPassed case "failing", "fail": result = CheckFailed + case "skipped": + result = CheckSkipped case "err", "error": d, _ := m["data"].(map[string]interface{}) errMsg, _ := d["error"].(string) @@ -183,6 +185,12 @@ failing_finding(resource_type, resource_id, event_data) = f { f := raw_finding("failing", resource_type, resource_id, event_data) } +skipped_finding(resource_type, resource_id, error_msg) = f { + f := raw_finding("skipped", resource_type, resource_id, { + "error": error_msg + }) +} + error_finding(resource_type, resource_id, error_msg) = f { f := raw_finding("error", resource_type, resource_id, { "error": error_msg diff --git a/pkg/compliance/evaluator_xccdf.go b/pkg/compliance/evaluator_xccdf.go index a45c3f6af4758..38b97b029bfeb 100644 --- a/pkg/compliance/evaluator_xccdf.go +++ b/pkg/compliance/evaluator_xccdf.go @@ -20,9 +20,12 @@ import ( "sync" "time" + "github.com/DataDog/datadog-agent/pkg/compliance/metrics" "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/util/executable" "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/DataDog/datadog-agent/pkg/version" + "github.com/DataDog/datadog-go/v5/statsd" ) const ( @@ -183,15 +186,15 @@ func (p *oscapIO) Stop() { close(p.ErrorCh) } -func EvaluateXCCDFRule(ctx context.Context, hostname string, benchmark *Benchmark, rule *Rule) []*CheckEvent { +func EvaluateXCCDFRule(ctx context.Context, hostname string, statsdClient *statsd.Client, benchmark *Benchmark, rule *Rule) []*CheckEvent { if !rule.IsXCCDF() { log.Errorf("given rule is not an XCCDF rule %s", rule.ID) return nil } - return evaluateXCCDFRule(ctx, hostname, benchmark, rule, rule.InputSpecs[0].XCCDF) + return evaluateXCCDFRule(ctx, hostname, statsdClient, benchmark, rule, rule.InputSpecs[0].XCCDF) } -func evaluateXCCDFRule(ctx context.Context, hostname string, benchmark *Benchmark, rule *Rule, spec *InputSpecXCCDF) []*CheckEvent { +func evaluateXCCDFRule(ctx context.Context, hostname string, statsdClient *statsd.Client, benchmark *Benchmark, rule *Rule, spec *InputSpecXCCDF) []*CheckEvent { oscapIOsMu.Lock() file := filepath.Join(benchmark.dirname, spec.Name) p := oscapIOs[file] @@ -216,6 +219,7 @@ func evaluateXCCDFRule(ctx context.Context, hostname string, benchmark *Benchmar reqs = append(reqs, &oscapIORule{Profile: spec.Profile, Rule: spec.Rule}) } + start := time.Now() for _, req := range reqs { select { case <-ctx.Done(): @@ -259,5 +263,19 @@ func evaluateXCCDFRule(ctx context.Context, hostname string, benchmark *Benchmar } } + if statsdClient != nil { + tags := []string{ + "rule_id:" + rule.ID, + "rule_input_type:xccdf", + "agent_version:" + version.AgentVersion, + } + if err := statsdClient.Count(metrics.MetricInputsHits, int64(len(reqs)), tags, 1.0); err != nil { + log.Errorf("failed to send input metric: %v", err) + } + if err := statsdClient.Timing(metrics.MetricInputsDuration, time.Since(start), tags, 1.0); err != nil { + log.Errorf("failed to send input metric: %v", err) + } + } + return events } diff --git a/pkg/compliance/k8sconfig/loader.go b/pkg/compliance/k8sconfig/loader.go new file mode 100644 index 0000000000000..0ed903e4ffd87 --- /dev/null +++ b/pkg/compliance/k8sconfig/loader.go @@ -0,0 +1,498 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package k8sconfig + +import ( + "context" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/shirou/gopsutil/v3/process" + "gopkg.in/yaml.v3" +) + +const version = "202305" + +const ( + k8sManifestsDir = "/etc/kubernetes/manifests" + k8sKubeconfigsDir = "/etc/kubernetes" +) + +type procsLoader func(ctx context.Context) []proc +type proc struct { + name string + flags map[string]string +} + +type loader struct { + hostroot string + errs []error +} + +// LoadConfiguration extracts a complete summary of all current Kubernetes +// node configuration. It does so by first looking at the running processes, +// looking up for Kubernetes related processes. For each component's process +// that were find, it collects the command line flags and associated files. +// The knowledge of each components specificities is based on the +// k8s_types_generator.go utility that encodes every relevant flags +// specificities (see types_generated.go). +func LoadConfiguration(ctx context.Context, hostroot string) (string, *K8sNodeConfig) { + l := &loader{hostroot: hostroot} + return l.load(ctx, l.loadProcesses) +} + +// NOTE(jinroh): the reason we rely on the loadProcesses argument is to simplify +// our testing to mock the process table. see loader_test.go +func (l *loader) load(ctx context.Context, loadProcesses procsLoader) (string, *K8sNodeConfig) { + node := K8sNodeConfig{Version: version} + + node.KubeletService = l.loadServiceFileMeta([]string{ + "/etc/systemd/system/kubelet.service.d/kubelet.conf", + "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf", + "/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf", + "/usr/lib/systemd/system/kubelet.service", + "/lib/systemd/system/kubelet.service", + }) + + node.AdminKubeconfig = l.loadKubeconfigMeta(filepath.Join(k8sKubeconfigsDir, "admin.conf")) + + node.Manifests.KubeApiserver = l.loadConfigFileMeta(filepath.Join(k8sManifestsDir, "kube-apiserver.yaml")) + node.Manifests.KubeContollerManager = l.loadConfigFileMeta(filepath.Join(k8sManifestsDir, "kube-controller-manager.yaml")) + node.Manifests.KubeScheduler = l.loadConfigFileMeta(filepath.Join(k8sManifestsDir, "kube-scheduler.yaml")) + node.Manifests.Etcd = l.loadConfigFileMeta(filepath.Join(k8sManifestsDir, "etcd.yaml")) + + if eksMeta := l.loadConfigFileMeta("/etc/eks/release"); eksMeta != nil { + node.ManagedEnvironment = &K8sManagedEnvConfig{ + Name: "eks", + Metadata: eksMeta.Content, + } + } + + for _, proc := range loadProcesses(ctx) { + switch proc.name { + case "etcd": + node.Components.Etcd = l.newK8sEtcdConfig(proc.flags) + case "kube-apiserver", "apiserver": + node.Components.KubeApiserver = l.newK8sKubeApiserverConfig(proc.flags) + case "kube-controller-manager", "kube-controller", "controller-manager": + node.Components.KubeControllerManager = l.newK8sKubeControllerManagerConfig(proc.flags) + case "kube-scheduler": + node.Components.KubeScheduler = l.newK8sKubeSchedulerConfig(proc.flags) + case "kubelet": + node.Components.Kubelet = l.newK8sKubeletConfig(proc.flags) + case "kube-proxy": + node.Components.KubeProxy = l.newK8sKubeProxyConfig(proc.flags) + } + } + + if len(l.errs) > 0 { + node.Errors = l.errs + } + + resourceType := "kubernetes_worker_node" + if node.Components.KubeApiserver != nil { + resourceType = "kubernetes_master_node" + } + + return resourceType, &node +} + +func (l *loader) loadMeta(name string, loadContent bool) (os.FileInfo, []byte, bool) { + name = filepath.Join(l.hostroot, name) + info, err := os.Stat(name) + if err != nil { + l.pushError(err) + return nil, nil, false + } + if loadContent && info.IsDir() { + return nil, nil, false + } + var b []byte + const maxSize = 64 * 1024 + if loadContent && info.Size() < maxSize { + f, err := os.Open(name) + if err != nil { + l.pushError(err) + } else { + b, err = ioutil.ReadAll(io.LimitReader(f, maxSize)) + if err != nil { + l.pushError(err) + } + } + } + return info, b, true +} + +func (l *loader) loadDirMeta(name string) *K8sDirMeta { + info, _, ok := l.loadMeta(name, false) + if !ok { + return nil + } + return &K8sDirMeta{ + Path: name, + Mode: uint32(info.Mode()), + } +} + +func (l *loader) loadServiceFileMeta(names []string) *K8sConfigFileMeta { + for _, name := range names { + meta := l.loadConfigFileMeta(name) + if meta != nil { + return meta + } + } + return nil +} + +func (l *loader) loadConfigFileMeta(name string) *K8sConfigFileMeta { + info, b, ok := l.loadMeta(name, true) + if !ok { + return nil + } + var content interface{} + switch filepath.Ext(name) { + case ".yaml", ".yml": + if err := yaml.Unmarshal(b, &content); err != nil { + l.pushError(err) + content = b + } + case ".json": + if err := json.Unmarshal(b, &content); err != nil { + l.pushError(err) + content = b + } + default: + content = string(b) + } + return &K8sConfigFileMeta{ + Path: name, + Mode: uint32(info.Mode()), + Content: content, + } +} + +func (l *loader) loadAdmissionConfigFileMeta(name string) *K8sAdmissionConfigFileMeta { + info, b, ok := l.loadMeta(name, true) + if !ok { + return nil + } + var content k8sAdmissionConfigSource + if err := yaml.Unmarshal(b, &content); err != nil { + l.pushError(err) + return nil + } + var result K8sAdmissionConfigFileMeta + for _, plugin := range content.Plugins { + added := &K8sAdmissionPluginConfigMeta{Name: plugin.Name} + if plugin.Configuration != nil { + added.Configuration = plugin.Configuration + } else if plugin.Path != "" { + added.Configuration = l.loadConfigFileMeta(plugin.Path) + } + result.Plugins = append(result.Plugins, added) + } + result.Path = name + result.Mode = uint32(info.Mode()) + return &result +} + +func (l *loader) loadEncryptionProviderConfigFileMeta(name string) *K8sEncryptionProviderConfigFileMeta { + info, b, ok := l.loadMeta(name, true) + if ok { + return nil + } + var content K8sEncryptionProviderConfigFileMeta + if err := yaml.Unmarshal(b, &content); err != nil { + l.pushError(err) + return nil + } + content.Path = name + content.Mode = uint32(info.Mode()) + return &content +} + +func (l *loader) loadTokenFileMeta(name string) *K8sTokenFileMeta { + info, _, ok := l.loadMeta(name, false) + if ok { + return nil + } + return &K8sTokenFileMeta{ + Path: name, + Mode: uint32(info.Mode()), + } +} + +func (l *loader) loadKeyFileMeta(name string) *K8sKeyFileMeta { + info, _, ok := l.loadMeta(name, false) + if !ok { + return nil + } + var meta K8sKeyFileMeta + meta.Path = name + meta.Mode = uint32(info.Mode()) + return &meta +} + +// https://github.com/kubernetes/kubernetes/blob/ad18954259eae3db51bac2274ed4ca7304b923c4/cmd/kubeadm/test/kubeconfig/util.go#L77-L87 +func (l *loader) loadCertFileMeta(name string) *K8sCertFileMeta { + info, certData, ok := l.loadMeta(name, true) + if !ok { + return nil + } + meta := l.extractCertData(certData) + meta.Path = name + meta.Mode = uint32(info.Mode()) + return meta +} + +func (l *loader) extractCertData(certData []byte) *K8sCertFileMeta { + const CertificateBlockType = "CERTIFICATE" + certPemBlock, _ := pem.Decode(certData) + if certPemBlock == nil { + l.pushError(fmt.Errorf("could not PEM decode certificate data")) + return nil + } + if certPemBlock.Type != CertificateBlockType { + l.pushError(fmt.Errorf("decoded PEM does not start with correct block type")) + return nil + } + c, err := x509.ParseCertificate(certPemBlock.Bytes) + if err != nil { + l.pushError(err) + return nil + } + sn := c.SerialNumber.String() + if sn == "0" { + sn = "" + } + + h256 := sha256.New() + h256.Write(certPemBlock.Bytes) + + var data K8sCertFileMeta + data.Certificate.Fingerprint = printSHA256Fingerprint(h256.Sum(nil)) + data.Certificate.SerialNumber = sn + data.Certificate.SubjectKeyId = printColumnSeparatedHex(c.SubjectKeyId) + data.Certificate.AuthorityKeyId = printColumnSeparatedHex(c.AuthorityKeyId) + data.Certificate.CommonName = c.Subject.CommonName + data.Certificate.Organization = c.Subject.Organization + data.Certificate.DNSNames = c.DNSNames + data.Certificate.IPAddresses = c.IPAddresses + data.Certificate.NotAfter = c.NotAfter + data.Certificate.NotBefore = c.NotBefore + return &data +} + +func (l *loader) loadKubeconfigMeta(name string) *K8sKubeconfigMeta { + info, b, ok := l.loadMeta(name, true) + if !ok { + return nil + } + + var source k8SKubeconfigSource + var err error + switch filepath.Ext(name) { + case ".json": + err = json.Unmarshal(b, &source) + default: + err = yaml.Unmarshal(b, &source) + } + if err != nil { + l.pushError(err) + return nil + } + + content := &K8SKubeconfig{ + Clusters: make(map[string]*K8sKubeconfigCluster), + Users: make(map[string]*K8sKubeconfigUser), + Contexts: make(map[string]*K8sKubeconfigContext), + } + for _, cluster := range source.Clusters { + var certAuth *K8sCertFileMeta + if certAuthDataB64 := cluster.Cluster.CertificateAuthorityData; certAuthDataB64 != "" { + certAuthData, err := base64.StdEncoding.DecodeString(certAuthDataB64) + if err != nil { + l.pushError(err) + } else { + certAuth = l.extractCertData(certAuthData) + } + } else if certAuthFile := cluster.Cluster.CertificateAuthority; certAuthFile != "" { + certAuth = l.loadCertFileMeta(certAuthFile) + } + content.Clusters[cluster.Name] = &K8sKubeconfigCluster{ + Server: cluster.Cluster.Server, + TLSServerName: cluster.Cluster.TLSServerName, + InsecureSkipTLSVerify: cluster.Cluster.InsecureSkipTLSVerify, + CertificateAuthority: certAuth, + ProxyURL: cluster.Cluster.ProxyURL, + DisableCompression: cluster.Cluster.DisableCompression, + } + } + for _, user := range source.Users { + var clientCert *K8sCertFileMeta + var clientKey *K8sKeyFileMeta + if clientCertDataB64 := user.User.ClientCertificateData; clientCertDataB64 != "" { + clientCertDataB64, err := base64.StdEncoding.DecodeString(clientCertDataB64) + if err != nil { + l.pushError(err) + } else { + clientCert = l.extractCertData(clientCertDataB64) + } + } else if clientCertFile := user.User.ClientCertificate; clientCertFile != "" { + clientCert = l.loadCertFileMeta(clientCertFile) + } + if clientKeyFile := user.User.ClientKey; clientKeyFile != "" { + clientKey = l.loadKeyFileMeta(clientKeyFile) + } + content.Users[user.Name] = &K8sKubeconfigUser{ + UseToken: user.User.TokenFile != "" || user.User.Token != "", + UsePassword: user.User.Password != "", + Exec: user.User.Exec, + ClientCertificate: clientCert, + ClientKey: clientKey, + } + } + for _, context := range source.Contexts { + content.Contexts[context.Name] = &K8sKubeconfigContext{ + Cluster: context.Context.Cluster, + User: context.Context.User, + Namespace: context.Context.Namespace, + } + } + + return &K8sKubeconfigMeta{ + Path: name, + Mode: uint32(info.Mode()), + Kubeconfig: content, + } +} + +// in OpenSSH >= 2.6, a fingerprint is now displayed as base64 SHA256. +func printSHA256Fingerprint(f []byte) string { + return fmt.Sprintf("SHA256:%s", strings.TrimSuffix(base64.StdEncoding.EncodeToString(f), "=")) +} + +func printColumnSeparatedHex(d []byte) string { + h := strings.ToUpper(hex.EncodeToString(d)) + var sb strings.Builder + for i, r := range h { + sb.WriteRune(r) + if i%2 == 1 && i != len(h)-1 { + sb.WriteRune(':') + } + } + return sb.String() +} + +func (l *loader) loadProcesses(ctx context.Context) []proc { + var procs []proc + processes, err := process.ProcessesWithContext(ctx) + if err != nil { + l.pushError(err) + return nil + } + for _, p := range processes { + name, err := p.Name() + if err != nil { + l.pushError(err) + continue + } + switch name { + case "etcd", + "kube-apiserver", "apiserver", + "kube-controller-manager", "kube-controller", "controller-manager", + "kube-scheduler", "kubelet", "kube-proxy": + cmdline, err := p.CmdlineSlice() + if err != nil { + l.pushError(err) + } else { + procs = append(procs, buildProc(name, cmdline)) + } + } + } + return procs +} + +func (l *loader) pushError(err error) { + if err != nil && !os.IsNotExist(err) { + l.errs = append(l.errs, err) + } +} + +func (l *loader) parseBool(v string) bool { + if v == "" { + return true + } + b, err := strconv.ParseBool(v) + if err != nil { + l.pushError(err) + } + return b +} + +//nolint:unused,deadcode +func (l *loader) parseFloat(v string) float64 { + f, err := strconv.ParseFloat(v, 64) + if err != nil { + l.pushError(err) + } + return f +} + +func (l *loader) parseInt(v string) int { + i, err := strconv.Atoi(v) + if err != nil { + l.pushError(err) + } + return i +} + +func (l *loader) parseDuration(v string) time.Duration { + d, err := time.ParseDuration(v) + if err != nil { + l.pushError(err) + } + return d +} + +func buildProc(name string, cmdline []string) proc { + p := proc{name: name} + if len(cmdline) > 1 { + cmdline = cmdline[1:] + p.flags = make(map[string]string) + pendingFlagValue := false + for i, arg := range cmdline { + if strings.HasPrefix(arg, "-") { + parts := strings.SplitN(arg, "=", 2) + if len(parts) == 2 { + p.flags[parts[0]] = parts[1] + } else { + p.flags[parts[0]] = "" + pendingFlagValue = true + } + } else { + if pendingFlagValue { + p.flags[cmdline[i-1]] = arg + } else { + p.flags[arg] = "" + } + } + } + } + return p +} diff --git a/pkg/compliance/k8sconfig/loader_test.go b/pkg/compliance/k8sconfig/loader_test.go new file mode 100644 index 0000000000000..5e25864b627a2 --- /dev/null +++ b/pkg/compliance/k8sconfig/loader_test.go @@ -0,0 +1,135 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux +// +build linux + +package k8sconfig + +import ( + "context" + "encoding/json" + "fmt" + "io/fs" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +const eksProcTable = ` +kubelet --config /etc/kubernetes/kubelet/kubelet-config.json --kubeconfig /var/lib/kubelet/kubeconfig --container-runtime-endpoint unix:///run/containerd/containerd.sock --image-credential-provider-config /etc/eks/image-credential-provider/config.json --image-credential-provider-bin-dir /etc/eks/image-credential-provider --node-ip=192.168.78.181 --pod-infra-container-image=602401143452.dkr.ecr.eu-west-3.amazonaws.com/eks/pause:3.5 --v=2 --cloud-provider=aws --container-runtime=remote --node-labels=eks.amazonaws.com/sourceLaunchTemplateVersion=1,alpha.eksctl.io/cluster-name=PierreGuilleminotGravitonSandbox,alpha.eksctl.io/nodegroup-name=standard,eks.amazonaws.com/nodegroup-image=ami-09f37ddb4a6ecc85e,eks.amazonaws.com/capacityType=ON_DEMAND,eks.amazonaws.com/nodegroup=standard,eks.amazonaws.com/sourceLaunchTemplateId=lt-0df2e04572534b928 --max-pods=17 +` + +// TODO(jinroh): use testdata files +var eksFs = []*mockFile{ + { + name: "/etc/eks/image-credential-provider", + mode: 0755, isDir: true, + }, + { + name: "/etc/eks/image-credential-provider/config.json", + mode: 0644, + content: `{\n "apiVersion": "kubelet.config.k8s.io/v1alpha1",\n "kind": "CredentialProviderConfig",\n "providers": [\n {\n "name": "ecr-credential-provider",\n "matchImages": [\n "*.dkr.ecr.*.amazonaws.com",\n "*.dkr.ecr.*.amazonaws.com.cn",\n "*.dkr.ecr-fips.*.amazonaws.com",\n "*.dkr.ecr.us-iso-east-1.c2s.ic.gov",\n "*.dkr.ecr.us-isob-east-1.sc2s.sgov.gov"\n ],\n "defaultCacheDuration": "12h",\n "apiVersion": "credentialprovider.kubelet.k8s.io/v1alpha1"\n }\n ]\n}`, + }, + { + name: "/etc/eks/release", + mode: 0664, + content: `BASE_AMI_ID="ami-0528ac959959021be"\nBUILD_TIME="Sat May 13 01:48:34 UTC 2023"\nBUILD_KERNEL="5.10.178-162.673.amzn2.aarch64"\nARCH="aarch64"`, + }, + { + name: "/etc/kubernetes/kubelet/kubelet-config.json", + mode: 0644, + content: `{\n "kind": "KubeletConfiguration",\n "apiVersion": "kubelet.config.k8s.io/v1beta1",\n "address": "0.0.0.0",\n "authentication": {\n "anonymous": {\n "enabled": false\n },\n "webhook": {\n "cacheTTL": "2m0s",\n "enabled": true\n },\n "x509": {\n "clientCAFile": "/etc/kubernetes/pki/ca.crt"\n }\n },\n "authorization": {\n "mode": "Webhook",\n "webhook": {\n "cacheAuthorizedTTL": "5m0s",\n "cacheUnauthorizedTTL": "30s"\n }\n },\n "clusterDomain": "cluster.local",\n "hairpinMode": "hairpin-veth",\n "readOnlyPort": 0,\n "cgroupDriver": "systemd",\n "cgroupRoot": "/",\n "featureGates": {\n "RotateKubeletServerCertificate": true,\n "KubeletCredentialProviders": true\n },\n "protectKernelDefaults": true,\n "serializeImagePulls": false,\n "serverTLSBootstrap": true,\n "tlsCipherSuites": [\n "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",\n "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",\n "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",\n "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",\n "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",\n "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",\n "TLS_RSA_WITH_AES_256_GCM_SHA384",\n "TLS_RSA_WITH_AES_128_GCM_SHA256"\n ],\n "clusterDNS": [\n "10.100.0.10"\n ],\n "kubeAPIQPS": 10,\n "kubeAPIBurst": 20,\n "evictionHard": {\n "memory.available": "100Mi",\n "nodefs.available": "10%",\n "nodefs.inodesFree": "5%"\n },\n "kubeReserved": {\n "cpu": "70m",\n "ephemeral-storage": "1Gi",\n "memory": "442Mi"\n },\n "systemReservedCgroup": "/system",\n "kubeReservedCgroup": "/runtime"\n}`, + }, + { + name: "/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf", + mode: 0644, + content: `[Service]\nEnvironment='KUBELET_ARGS=--node-ip=192.168.78.181 --pod-infra-container-image=602401143452.dkr.ecr.eu-west-3.amazonaws.com/eks/pause:3.5 --v=2 --cloud-provider=aws --container-runtime=remote'`, + }, + { + name: "/var/lib/kubelet/kubeconfig", + mode: 0644, + content: `apiVersion: v1\nkind: Config\nclusters:\n- cluster:\n certificate-authority: /etc/kubernetes/pki/ca.crt\n server: https://1DB2F34ED30B77AFEA800D56D3EBED0B.sk1.eu-west-3.eks.amazonaws.com\n name: kubernetes\ncontexts:\n- context:\n cluster: kubernetes\n user: kubelet\n name: kubelet\ncurrent-context: kubelet\nusers:\n- name: kubelet\n user:\n exec:\n apiVersion: client.authentication.k8s.io/v1beta1\n command: /usr/bin/aws-iam-authenticator\n args:\n - "token"\n - "-i"\n - "PierreGuilleminotGravitonSandbox"\n - --region\n - "eu-west-3"`, + }, +} + +const kubadmProcTable = ` +kube-proxy --config=/var/lib/kube-proxy/config.conf --hostname-override=lima-k8s +kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --config=/var/lib/kubelet/config.yaml --container-runtime-endpoint=unix:///run/containerd/containerd.sock --pod-infra-container-image=registry.k8s.io/pause:3.9 +etcd --advertise-client-urls=https://192.168.5.15:2379 --cert-file=/etc/kubernetes/pki/etcd/server.crt --client-cert-auth=true --data-dir=/var/lib/etcd --experimental-initial-corrupt-check=true --experimental-watch-progress-notify-interval=5s --initial-advertise-peer-urls=https://192.168.5.15:2380 --initial-cluster=lima-k8s=https://192.168.5.15:2380 --key-file=/etc/kubernetes/pki/etcd/server.key --listen-client-urls=https://127.0.0.1:2379,https://192.168.5.15:2379 --listen-metrics-urls=http://127.0.0.1:2381 --listen-peer-urls=https://192.168.5.15:2380 --name=lima-k8s --peer-cert-file=/etc/kubernetes/pki/etcd/peer.crt --peer-client-cert-auth=true --peer-key-file=/etc/kubernetes/pki/etcd/peer.key --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt --snapshot-count=10000 --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt +kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/etc/kubernetes/controller-manager.conf --authorization-kubeconfig=/etc/kubernetes/controller-manager.conf --bind-address=127.0.0.1 --client-ca-file=/etc/kubernetes/pki/ca.crt --cluster-cidr=10.244.0.0/16 --cluster-name=kubernetes --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt --cluster-signing-key-file=/etc/kubernetes/pki/ca.key --controllers=*,bootstrapsigner,tokencleaner --kubeconfig=/etc/kubernetes/controller-manager.conf --leader-elect=true --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt --root-ca-file=/etc/kubernetes/pki/ca.crt --service-account-private-key-file=/etc/kubernetes/pki/sa.key --service-cluster-ip-range=10.96.0.0/12 --use-service-account-credentials=true +kube-scheduler --authentication-kubeconfig=/etc/kubernetes/scheduler.conf --authorization-kubeconfig=/etc/kubernetes/scheduler.conf --bind-address=127.0.0.1 --kubeconfig=/etc/kubernetes/scheduler.conf --leader-elect=true +kube-apiserver --audit-policy-file=/etc/kubernetes/audit-policy.yaml --audit-log-path=/var/log/kubernetes/audit/audit.log --advertise-address=192.168.5.15 --allow-privileged=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/pki/ca.crt --enable-admission-plugins=NodeRestriction --enable-bootstrap-token-auth=true --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key --requestheader-allowed-names=front-proxy-client --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/etc/kubernetes/pki/sa.pub --service-account-signing-key-file=/etc/kubernetes/pki/sa.key --service-cluster-ip-range=10.96.0.0/12 --tls-cert-file=/etc/kubernetes/pki/apiserver.crt --tls-private-key-file=/etc/kubernetes/pki/apiserver.key +` + +func TestKubAdmConfigLoader(t *testing.T) { + var table []proc + for _, l := range strings.Split(kubadmProcTable, "\n") { + if l == "" { + continue + } + cmdline := strings.Fields(l) + table = append(table, buildProc(cmdline[0], cmdline)) + } + + tmpDir := t.TempDir() + conf := loadTestConfiguration(tmpDir, table) + assert.Empty(t, conf.Errors) +} + +func TestKubEksConfigLoader(t *testing.T) { + var table []proc + for _, l := range strings.Split(eksProcTable, "\n") { + if l == "" { + continue + } + cmdline := strings.Fields(l) + table = append(table, buildProc(cmdline[0], cmdline)) + } + tmpDir := t.TempDir() + for _, f := range eksFs { + f.create(t, tmpDir) + } + conf := loadTestConfiguration(tmpDir, table) + b, _ := json.MarshalIndent(conf, "", " ") + assert.Empty(t, conf.Errors) + assert.NotNil(t, conf.ManagedEnvironment) + assert.Equal(t, conf.ManagedEnvironment.Name, "eks") + assert.True(t, strings.HasPrefix(conf.ManagedEnvironment.Metadata.(string), `BASE_AMI_ID="ami-0528ac959959021be"`)) + fmt.Println(string(b)) +} + +func loadTestConfiguration(hostroot string, table []proc) *K8sNodeConfig { + l := &loader{hostroot: hostroot} + _, data := l.load(context.Background(), func(ctx context.Context) []proc { + return table + }) + return data +} + +type mockFile struct { + isDir bool + name string + mode uint32 + content string +} + +func (f *mockFile) create(t *testing.T, root string) { + if f.isDir { + if err := os.MkdirAll(filepath.Join(root, f.name), fs.FileMode(f.mode)); err != nil { + t.Fatal(err) + } + } else { + if err := os.MkdirAll(filepath.Join(root, filepath.Dir(f.name)), fs.FileMode(0755)); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(root, f.name), []byte(strings.ReplaceAll(f.content, "\\n", "\n")), os.FileMode(f.mode)); err != nil { + t.Fatal(err) + } + } +} diff --git a/pkg/compliance/k8sconfig/types.go b/pkg/compliance/k8sconfig/types.go new file mode 100644 index 0000000000000..2135546357a80 --- /dev/null +++ b/pkg/compliance/k8sconfig/types.go @@ -0,0 +1,219 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package k8sconfig + +import ( + "net" + "time" +) + +type K8sNodeConfig struct { + Version string `json:"version"` + ManagedEnvironment *K8sManagedEnvConfig `json:"managedEnvironment,omitempty"` + KubeletService *K8sConfigFileMeta `json:"kubeletService,omitempty"` + AdminKubeconfig *K8sKubeconfigMeta `json:"adminKubeconfig,omitempty"` + Components struct { + Etcd *K8sEtcdConfig `json:"etcd,omitempty"` + KubeApiserver *K8sKubeApiserverConfig `json:"kubeApiserver,omitempty"` + KubeControllerManager *K8sKubeControllerManagerConfig `json:"kubeControllerManager,omitempty"` + Kubelet *K8sKubeletConfig `json:"kubelet,omitempty"` + KubeProxy *K8sKubeProxyConfig `json:"kubeProxy,omitempty"` + KubeScheduler *K8sKubeSchedulerConfig `json:"kubeScheduler,omitempty"` + } `json:"components"` + Manifests struct { + Etcd *K8sConfigFileMeta `json:"etcd,omitempty"` + KubeContollerManager *K8sConfigFileMeta `json:"kubeContollerManager,omitempty"` + KubeApiserver *K8sConfigFileMeta `json:"kubeApiserver,omitempty"` + KubeScheduler *K8sConfigFileMeta `json:"kubeScheduler,omitempty"` + } `json:"manifests"` + Errors []error `json:"errors,omitempty"` +} + +type K8sManagedEnvConfig struct { + Name string `json:"name"` + Metadata interface{} `json:"metadata"` +} + +type K8sDirMeta struct { + Path string `json:"path"` + Mode uint32 `json:"mode"` +} + +type K8sConfigFileMeta struct { + Path string `json:"path"` + Mode uint32 `json:"mode"` + Content interface{} `json:"content"` +} + +type K8sTokenFileMeta struct { + Path string `json:"path"` + Mode uint32 `json:"mode"` +} + +// https://github.com/kubernetes/kubernetes/blob/6356023cb42d681b7ad0e6d14d1652247d75b797/staging/src/k8s.io/apiserver/pkg/apis/apiserver/types.go#L30 +type ( + k8sAdmissionConfigSource struct { + Plugins []struct { + Name string `yaml:"name"` + Path string `yaml:"path"` + Configuration interface{} `yaml:"configuration"` + } `yaml:"plugins"` + } + + K8sAdmissionPluginConfigMeta struct { + Name string `json:"name"` + Configuration interface{} `json:"configuration,omitempty"` + } + + K8sAdmissionConfigFileMeta struct { + Path string `json:"path,omitempty"` + Mode uint32 `json:"mode,omitempty"` + Plugins []*K8sAdmissionPluginConfigMeta `json:"plugins"` + } +) + +type K8sKubeconfigMeta struct { + Path string `json:"path,omitempty"` + Mode uint32 `json:"mode,omitempty"` + Kubeconfig interface{} `json:"kubeconfig"` +} + +type K8sKeyFileMeta struct { + Path string `json:"path,omitempty"` + Mode uint32 `json:"mode,omitempty"` +} + +type K8sCertFileMeta struct { + Path string `json:"path,omitempty"` + Mode uint32 `json:"mode,omitempty"` + Certificate struct { + Fingerprint string `json:"fingerprint"` + SerialNumber string `json:"serialNumber,omitempty"` + SubjectKeyId string `json:"subjectKeyId,omitempty"` + AuthorityKeyId string `json:"authorityKeyId,omitempty"` + CommonName string `json:"commonName"` + Organization []string `json:"organization,omitempty"` + DNSNames []string `json:"dnsNames,omitempty"` + IPAddresses []net.IP `json:"ipAddresses,omitempty"` + NotAfter time.Time `json:"notAfter"` + NotBefore time.Time `json:"notBefore"` + } `json:"certificate"` +} + +// k8SKubeconfigSource is used to parse the kubeconfig files. It is not +// exported as-is, and used to build K8sKubeconfig. +// https://github.com/kubernetes/kubernetes/blob/ad18954259eae3db51bac2274ed4ca7304b923c4/staging/src/k8s.io/client-go/tools/clientcmd/api/types.go#LL31C1-L55C2 +type ( + k8SKubeconfigSource struct { + Kind string `yaml:"kind,omitempty"` + APIVersion string `yaml:"apiVersion,omitempty"` + + Clusters []struct { + Name string `yaml:"name"` + Cluster k8sKubeconfigClusterSource `yaml:"cluster"` + } `yaml:"clusters"` + + Users []struct { + Name string `yaml:"name"` + User k8sKubeconfigUserSource `yaml:"user"` + } `yaml:"users"` + + Contexts []struct { + Name string `yaml:"name"` + Context k8sKubeconfigContextSource `yaml:"context"` + } `yaml:"contexts"` + + CurrentContext string `yaml:"current-context"` + } + + k8sKubeconfigClusterSource struct { + Server string `yaml:"server"` + TLSServerName string `yaml:"tls-server-name,omitempty"` + InsecureSkipTLSVerify bool `yaml:"insecure-skip-tls-verify,omitempty"` + CertificateAuthority string `yaml:"certificate-authority,omitempty"` + CertificateAuthorityData string `yaml:"certificate-authority-data,omitempty"` + ProxyURL string `yaml:"proxy-url,omitempty"` + DisableCompression bool `yaml:"disable-compression,omitempty"` + } + + k8sKubeconfigUserSource struct { + ClientCertificate string `yaml:"client-certificate,omitempty"` + ClientCertificateData string `yaml:"client-certificate-data,omitempty"` + ClientKey string `yaml:"client-key,omitempty"` + Token string `yaml:"token,omitempty"` + TokenFile string `yaml:"tokenFile,omitempty"` + Username string `yaml:"username,omitempty"` + Password string `yaml:"password,omitempty"` + Exec interface{} `yaml:"exec,omitempty"` + } + + k8sKubeconfigContextSource struct { + Cluster string `yaml:"cluster"` + User string `yaml:"user"` + Namespace string `yaml:"namespace,omitempty"` + } + + K8SKubeconfig struct { + Clusters map[string]*K8sKubeconfigCluster `json:"clusters"` + Users map[string]*K8sKubeconfigUser `json:"users"` + Contexts map[string]*K8sKubeconfigContext `json:"contexts"` + CurrentContext string `json:"currentContext"` + } + + K8sKubeconfigCluster struct { + Server string `json:"server"` + TLSServerName string `json:"tlsServerName,omitempty"` + InsecureSkipTLSVerify bool `json:"insecureSkipTlsVerify,omitempty"` + CertificateAuthority *K8sCertFileMeta `json:"certificateAuthority,omitempty"` + ProxyURL string `json:"proxyUrl,omitempty"` + DisableCompression bool `json:"disableCompression,omitempty"` + } + + K8sKubeconfigUser struct { + UseToken bool `json:"useToken"` + UsePassword bool `json:"usePassword"` + Exec interface{} `json:"exec"` + ClientCertificate *K8sCertFileMeta `json:"clientCertificate,omitempty"` + ClientKey *K8sKeyFileMeta `json:"clientKey,omitempty"` + } + + K8sKubeconfigContext struct { + Cluster string `json:"cluster"` + User string `json:"user"` + Namespace string `json:"namespace,omitempty"` + } +) + +// https://github.com/kubernetes/kubernetes/blob/e1ad9bee5bba8fbe85a6bf6201379ce8b1a611b1/staging/src/k8s.io/apiserver/pkg/apis/config/types.go#L70 +type ( + K8sEncryptionProviderConfigFileMeta struct { + Path string `json:"path,omitempty"` + Mode uint32 `json:"mode,omitempty"` + Resources []struct { + Resources []string `yaml:"resources" json:"resources"` + Providers []struct { + AESGCM *K8sEncryptionProviderKeysSource `yaml:"aesgcm,omitempty" json:"aesgcm,omitempty"` + AESCBC *K8sEncryptionProviderKeysSource `yaml:"aescbc,omitempty" json:"aescbc,omitempty"` + Secretbox *K8sEncryptionProviderKeysSource `yaml:"secretbox,omitempty" json:"secretbox,omitempty"` + Identity *struct{} `yaml:"identity,omitempty" json:"identity,omitempty"` + KMS *K8sEncryptionProviderKMSSource `yaml:"kms,omitempty" json:"kms,omitempty"` + } `yaml:"providers" json:"providers"` + } `yaml:"resources" json:"resources"` + } + + K8sEncryptionProviderKMSSource struct { + Name string `yaml:"name" json:"name"` + Endpoint string `yaml:"endpoint" json:"endpoint"` + CacheSize int `yaml:"cachesize" json:"cachesize"` + Timeout string `yaml:"timeout" json:"timeout"` + } + + K8sEncryptionProviderKeysSource struct { + Keys []struct { + Name string `yaml:"name" json:"name"` + } `yaml:"keys" json:"keys"` + } +) diff --git a/pkg/compliance/k8sconfig/types_generated.go b/pkg/compliance/k8sconfig/types_generated.go new file mode 100644 index 0000000000000..1dfa415047dd5 --- /dev/null +++ b/pkg/compliance/k8sconfig/types_generated.go @@ -0,0 +1,777 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// !!! +// This is a generated file: regenerate with go run ./pkg/compliance/tools/k8s_types_generator.go +// !!! +package k8sconfig + +import ( + "strings" + "time" +) + +type K8sKubeApiserverConfig struct { + AdmissionControlConfigFile *K8sAdmissionConfigFileMeta `json:"admission-control-config-file"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + AllowPrivileged bool `json:"allow-privileged"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + AnonymousAuth bool `json:"anonymous-auth"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + AuditLogMaxage int `json:"audit-log-maxage"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + AuditLogMaxbackup int `json:"audit-log-maxbackup"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + AuditLogMaxsize int `json:"audit-log-maxsize"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + AuditLogPath string `json:"audit-log-path"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + AuditPolicyFile *K8sConfigFileMeta `json:"audit-policy-file"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + AuthorizationMode []string `json:"authorization-mode"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + BindAddress string `json:"bind-address"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + ClientCaFile *K8sCertFileMeta `json:"client-ca-file"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + DisableAdmissionPlugins []string `json:"disable-admission-plugins"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + EnableAdmissionPlugins []string `json:"enable-admission-plugins"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + EnableBootstrapTokenAuth bool `json:"enable-bootstrap-token-auth"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + EncryptionProviderConfig *K8sEncryptionProviderConfigFileMeta `json:"encryption-provider-config"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + EtcdCafile *K8sCertFileMeta `json:"etcd-cafile"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + EtcdCertfile *K8sCertFileMeta `json:"etcd-certfile"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + EtcdKeyfile *K8sKeyFileMeta `json:"etcd-keyfile"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + FeatureGates string `json:"feature-gates"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + KubeletCertificateAuthority *K8sCertFileMeta `json:"kubelet-certificate-authority"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + KubeletClientCertificate *K8sCertFileMeta `json:"kubelet-client-certificate"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + KubeletClientKey *K8sKeyFileMeta `json:"kubelet-client-key"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + Profiling bool `json:"profiling"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + ProxyClientCertFile *K8sCertFileMeta `json:"proxy-client-cert-file"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + ProxyClientKeyFile *K8sKeyFileMeta `json:"proxy-client-key-file"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + RequestTimeout time.Duration `json:"request-timeout"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + RequestheaderAllowedNames []string `json:"requestheader-allowed-names"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + RequestheaderClientCaFile *K8sCertFileMeta `json:"requestheader-client-ca-file"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + RequestheaderExtraHeadersPrefix []string `json:"requestheader-extra-headers-prefix"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + RequestheaderGroupHeaders []string `json:"requestheader-group-headers"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + RequestheaderUsernameHeaders []string `json:"requestheader-username-headers"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + SecurePort int `json:"secure-port"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + ServiceAccountIssuer string `json:"service-account-issuer"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + ServiceAccountKeyFile *K8sKeyFileMeta `json:"service-account-key-file"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + ServiceAccountLookup bool `json:"service-account-lookup"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + ServiceAccountSigningKeyFile *K8sKeyFileMeta `json:"service-account-signing-key-file"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + ServiceClusterIpRange string `json:"service-cluster-ip-range"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + TlsCertFile *K8sCertFileMeta `json:"tls-cert-file"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + TlsCipherSuites []string `json:"tls-cipher-suites"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + TlsPrivateKeyFile *K8sKeyFileMeta `json:"tls-private-key-file"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + TokenAuthFile *K8sTokenFileMeta `json:"token-auth-file"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + SkippedFlags map[string]string `json:"skippedFlags,omitempty"` +} + +func (l *loader) newK8sKubeApiserverConfig(flags map[string]string) *K8sKubeApiserverConfig { + if flags == nil { + return nil + } + var res K8sKubeApiserverConfig + if v, ok := flags["--admission-control-config-file"]; ok { + delete(flags, "--admission-control-config-file") + res.AdmissionControlConfigFile = l.loadAdmissionConfigFileMeta(v) + } + if v, ok := flags["--allow-privileged"]; ok { + delete(flags, "--allow-privileged") + res.AllowPrivileged = l.parseBool(v) + } + if v, ok := flags["--anonymous-auth"]; ok { + delete(flags, "--anonymous-auth") + res.AnonymousAuth = l.parseBool(v) + } else { + res.AnonymousAuth = l.parseBool("true") + } + if v, ok := flags["--audit-log-maxage"]; ok { + delete(flags, "--audit-log-maxage") + res.AuditLogMaxage = l.parseInt(v) + } else { + res.AuditLogMaxage = l.parseInt("0") + } + if v, ok := flags["--audit-log-maxbackup"]; ok { + delete(flags, "--audit-log-maxbackup") + res.AuditLogMaxbackup = l.parseInt(v) + } else { + res.AuditLogMaxbackup = l.parseInt("0") + } + if v, ok := flags["--audit-log-maxsize"]; ok { + delete(flags, "--audit-log-maxsize") + res.AuditLogMaxsize = l.parseInt(v) + } else { + res.AuditLogMaxsize = l.parseInt("0") + } + if v, ok := flags["--audit-log-path"]; ok { + delete(flags, "--audit-log-path") + res.AuditLogPath = v + } + if v, ok := flags["--audit-policy-file"]; ok { + delete(flags, "--audit-policy-file") + res.AuditPolicyFile = l.loadConfigFileMeta(v) + } + if v, ok := flags["--authorization-mode"]; ok { + delete(flags, "--authorization-mode") + res.AuthorizationMode = strings.Split(v, ",") + } else { + res.AuthorizationMode = strings.Split("AlwaysAllow", ",") + } + if v, ok := flags["--bind-address"]; ok { + delete(flags, "--bind-address") + res.BindAddress = v + } else { + res.BindAddress = "0.0.0.0" + } + if v, ok := flags["--client-ca-file"]; ok { + delete(flags, "--client-ca-file") + res.ClientCaFile = l.loadCertFileMeta(v) + } + if v, ok := flags["--disable-admission-plugins"]; ok { + delete(flags, "--disable-admission-plugins") + res.DisableAdmissionPlugins = strings.Split(v, ",") + } + if v, ok := flags["--enable-admission-plugins"]; ok { + delete(flags, "--enable-admission-plugins") + res.EnableAdmissionPlugins = strings.Split(v, ",") + } + if v, ok := flags["--enable-bootstrap-token-auth"]; ok { + delete(flags, "--enable-bootstrap-token-auth") + res.EnableBootstrapTokenAuth = l.parseBool(v) + } + if v, ok := flags["--encryption-provider-config"]; ok { + delete(flags, "--encryption-provider-config") + res.EncryptionProviderConfig = l.loadEncryptionProviderConfigFileMeta(v) + } + if v, ok := flags["--etcd-cafile"]; ok { + delete(flags, "--etcd-cafile") + res.EtcdCafile = l.loadCertFileMeta(v) + } + if v, ok := flags["--etcd-certfile"]; ok { + delete(flags, "--etcd-certfile") + res.EtcdCertfile = l.loadCertFileMeta(v) + } + if v, ok := flags["--etcd-keyfile"]; ok { + delete(flags, "--etcd-keyfile") + res.EtcdKeyfile = l.loadKeyFileMeta(v) + } + if v, ok := flags["--feature-gates"]; ok { + delete(flags, "--feature-gates") + res.FeatureGates = v + } + if v, ok := flags["--kubelet-certificate-authority"]; ok { + delete(flags, "--kubelet-certificate-authority") + res.KubeletCertificateAuthority = l.loadCertFileMeta(v) + } + if v, ok := flags["--kubelet-client-certificate"]; ok { + delete(flags, "--kubelet-client-certificate") + res.KubeletClientCertificate = l.loadCertFileMeta(v) + } + if v, ok := flags["--kubelet-client-key"]; ok { + delete(flags, "--kubelet-client-key") + res.KubeletClientKey = l.loadKeyFileMeta(v) + } + if v, ok := flags["--profiling"]; ok { + delete(flags, "--profiling") + res.Profiling = l.parseBool(v) + } else { + res.Profiling = l.parseBool("true") + } + if v, ok := flags["--proxy-client-cert-file"]; ok { + delete(flags, "--proxy-client-cert-file") + res.ProxyClientCertFile = l.loadCertFileMeta(v) + } + if v, ok := flags["--proxy-client-key-file"]; ok { + delete(flags, "--proxy-client-key-file") + res.ProxyClientKeyFile = l.loadKeyFileMeta(v) + } + if v, ok := flags["--request-timeout"]; ok { + delete(flags, "--request-timeout") + res.RequestTimeout = l.parseDuration(v) + } else { + res.RequestTimeout = l.parseDuration("1m0s") + } + if v, ok := flags["--requestheader-allowed-names"]; ok { + delete(flags, "--requestheader-allowed-names") + res.RequestheaderAllowedNames = strings.Split(v, ",") + } + if v, ok := flags["--requestheader-client-ca-file"]; ok { + delete(flags, "--requestheader-client-ca-file") + res.RequestheaderClientCaFile = l.loadCertFileMeta(v) + } + if v, ok := flags["--requestheader-extra-headers-prefix"]; ok { + delete(flags, "--requestheader-extra-headers-prefix") + res.RequestheaderExtraHeadersPrefix = strings.Split(v, ",") + } + if v, ok := flags["--requestheader-group-headers"]; ok { + delete(flags, "--requestheader-group-headers") + res.RequestheaderGroupHeaders = strings.Split(v, ",") + } + if v, ok := flags["--requestheader-username-headers"]; ok { + delete(flags, "--requestheader-username-headers") + res.RequestheaderUsernameHeaders = strings.Split(v, ",") + } + if v, ok := flags["--secure-port"]; ok { + delete(flags, "--secure-port") + res.SecurePort = l.parseInt(v) + } else { + res.SecurePort = l.parseInt("6443") + } + if v, ok := flags["--service-account-issuer"]; ok { + delete(flags, "--service-account-issuer") + res.ServiceAccountIssuer = v + } + if v, ok := flags["--service-account-key-file"]; ok { + delete(flags, "--service-account-key-file") + res.ServiceAccountKeyFile = l.loadKeyFileMeta(v) + } + if v, ok := flags["--service-account-lookup"]; ok { + delete(flags, "--service-account-lookup") + res.ServiceAccountLookup = l.parseBool(v) + } else { + res.ServiceAccountLookup = l.parseBool("true") + } + if v, ok := flags["--service-account-signing-key-file"]; ok { + delete(flags, "--service-account-signing-key-file") + res.ServiceAccountSigningKeyFile = l.loadKeyFileMeta(v) + } + if v, ok := flags["--service-cluster-ip-range"]; ok { + delete(flags, "--service-cluster-ip-range") + res.ServiceClusterIpRange = v + } + if v, ok := flags["--tls-cert-file"]; ok { + delete(flags, "--tls-cert-file") + res.TlsCertFile = l.loadCertFileMeta(v) + } + if v, ok := flags["--tls-cipher-suites"]; ok { + delete(flags, "--tls-cipher-suites") + res.TlsCipherSuites = strings.Split(v, ",") + } + if v, ok := flags["--tls-private-key-file"]; ok { + delete(flags, "--tls-private-key-file") + res.TlsPrivateKeyFile = l.loadKeyFileMeta(v) + } + if v, ok := flags["--token-auth-file"]; ok { + delete(flags, "--token-auth-file") + res.TokenAuthFile = l.loadTokenFileMeta(v) + } + if len(flags) > 0 { + res.SkippedFlags = flags + } + return &res +} + +type K8sKubeSchedulerConfig struct { + AuthenticationKubeconfig *K8sKubeconfigMeta `json:"authentication-kubeconfig"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + AuthorizationKubeconfig string `json:"authorization-kubeconfig"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + BindAddress string `json:"bind-address"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + ClientCaFile *K8sCertFileMeta `json:"client-ca-file"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + Config *K8sConfigFileMeta `json:"config"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + FeatureGates string `json:"feature-gates"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + Kubeconfig *K8sKubeconfigMeta `json:"kubeconfig"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + Profiling bool `json:"profiling"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + RequestheaderAllowedNames []string `json:"requestheader-allowed-names"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + RequestheaderClientCaFile *K8sCertFileMeta `json:"requestheader-client-ca-file"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + RequestheaderExtraHeadersPrefix []string `json:"requestheader-extra-headers-prefix"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + RequestheaderGroupHeaders []string `json:"requestheader-group-headers"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + RequestheaderUsernameHeaders []string `json:"requestheader-username-headers"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + SecurePort int `json:"secure-port"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + TlsCertFile *K8sCertFileMeta `json:"tls-cert-file"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + TlsCipherSuites []string `json:"tls-cipher-suites"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + TlsPrivateKeyFile *K8sKeyFileMeta `json:"tls-private-key-file"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + SkippedFlags map[string]string `json:"skippedFlags,omitempty"` +} + +func (l *loader) newK8sKubeSchedulerConfig(flags map[string]string) *K8sKubeSchedulerConfig { + if flags == nil { + return nil + } + var res K8sKubeSchedulerConfig + if v, ok := flags["--authentication-kubeconfig"]; ok { + delete(flags, "--authentication-kubeconfig") + res.AuthenticationKubeconfig = l.loadKubeconfigMeta(v) + } + if v, ok := flags["--authorization-kubeconfig"]; ok { + delete(flags, "--authorization-kubeconfig") + res.AuthorizationKubeconfig = v + } + if v, ok := flags["--bind-address"]; ok { + delete(flags, "--bind-address") + res.BindAddress = v + } else { + res.BindAddress = "0.0.0.0" + } + if v, ok := flags["--client-ca-file"]; ok { + delete(flags, "--client-ca-file") + res.ClientCaFile = l.loadCertFileMeta(v) + } + if v, ok := flags["--config"]; ok { + delete(flags, "--config") + res.Config = l.loadConfigFileMeta(v) + } + if v, ok := flags["--feature-gates"]; ok { + delete(flags, "--feature-gates") + res.FeatureGates = v + } + if v, ok := flags["--kubeconfig"]; ok { + delete(flags, "--kubeconfig") + res.Kubeconfig = l.loadKubeconfigMeta(v) + } + if v, ok := flags["--profiling"]; ok { + delete(flags, "--profiling") + res.Profiling = l.parseBool(v) + } else { + res.Profiling = l.parseBool("true") + } + if v, ok := flags["--requestheader-allowed-names"]; ok { + delete(flags, "--requestheader-allowed-names") + res.RequestheaderAllowedNames = strings.Split(v, ",") + } + if v, ok := flags["--requestheader-client-ca-file"]; ok { + delete(flags, "--requestheader-client-ca-file") + res.RequestheaderClientCaFile = l.loadCertFileMeta(v) + } + if v, ok := flags["--requestheader-extra-headers-prefix"]; ok { + delete(flags, "--requestheader-extra-headers-prefix") + res.RequestheaderExtraHeadersPrefix = strings.Split(v, ",") + } else { + res.RequestheaderExtraHeadersPrefix = strings.Split("x-remote-extra-", ",") + } + if v, ok := flags["--requestheader-group-headers"]; ok { + delete(flags, "--requestheader-group-headers") + res.RequestheaderGroupHeaders = strings.Split(v, ",") + } else { + res.RequestheaderGroupHeaders = strings.Split("x-remote-group", ",") + } + if v, ok := flags["--requestheader-username-headers"]; ok { + delete(flags, "--requestheader-username-headers") + res.RequestheaderUsernameHeaders = strings.Split(v, ",") + } else { + res.RequestheaderUsernameHeaders = strings.Split("x-remote-user", ",") + } + if v, ok := flags["--secure-port"]; ok { + delete(flags, "--secure-port") + res.SecurePort = l.parseInt(v) + } else { + res.SecurePort = l.parseInt("10259") + } + if v, ok := flags["--tls-cert-file"]; ok { + delete(flags, "--tls-cert-file") + res.TlsCertFile = l.loadCertFileMeta(v) + } + if v, ok := flags["--tls-cipher-suites"]; ok { + delete(flags, "--tls-cipher-suites") + res.TlsCipherSuites = strings.Split(v, ",") + } + if v, ok := flags["--tls-private-key-file"]; ok { + delete(flags, "--tls-private-key-file") + res.TlsPrivateKeyFile = l.loadKeyFileMeta(v) + } + if len(flags) > 0 { + res.SkippedFlags = flags + } + return &res +} + +type K8sKubeControllerManagerConfig struct { + AuthenticationKubeconfig *K8sKubeconfigMeta `json:"authentication-kubeconfig"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + AuthorizationKubeconfig string `json:"authorization-kubeconfig"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + BindAddress string `json:"bind-address"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + ClientCaFile *K8sCertFileMeta `json:"client-ca-file"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + ClusterSigningCertFile *K8sCertFileMeta `json:"cluster-signing-cert-file"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + ClusterSigningKeyFile *K8sKeyFileMeta `json:"cluster-signing-key-file"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + FeatureGates string `json:"feature-gates"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + Kubeconfig *K8sKubeconfigMeta `json:"kubeconfig"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + Profiling bool `json:"profiling"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + RequestheaderAllowedNames []string `json:"requestheader-allowed-names"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + RequestheaderClientCaFile *K8sCertFileMeta `json:"requestheader-client-ca-file"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + RequestheaderExtraHeadersPrefix []string `json:"requestheader-extra-headers-prefix"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + RequestheaderGroupHeaders []string `json:"requestheader-group-headers"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + RequestheaderUsernameHeaders []string `json:"requestheader-username-headers"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + RootCaFile *K8sCertFileMeta `json:"root-ca-file"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + SecurePort int `json:"secure-port"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + ServiceAccountPrivateKeyFile *K8sKeyFileMeta `json:"service-account-private-key-file"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + ServiceClusterIpRange string `json:"service-cluster-ip-range"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + TerminatedPodGcThreshold int `json:"terminated-pod-gc-threshold"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + TlsCertFile *K8sCertFileMeta `json:"tls-cert-file"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + TlsCipherSuites []string `json:"tls-cipher-suites"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + TlsPrivateKeyFile *K8sKeyFileMeta `json:"tls-private-key-file"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + UseServiceAccountCredentials bool `json:"use-service-account-credentials"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + SkippedFlags map[string]string `json:"skippedFlags,omitempty"` +} + +func (l *loader) newK8sKubeControllerManagerConfig(flags map[string]string) *K8sKubeControllerManagerConfig { + if flags == nil { + return nil + } + var res K8sKubeControllerManagerConfig + if v, ok := flags["--authentication-kubeconfig"]; ok { + delete(flags, "--authentication-kubeconfig") + res.AuthenticationKubeconfig = l.loadKubeconfigMeta(v) + } + if v, ok := flags["--authorization-kubeconfig"]; ok { + delete(flags, "--authorization-kubeconfig") + res.AuthorizationKubeconfig = v + } + if v, ok := flags["--bind-address"]; ok { + delete(flags, "--bind-address") + res.BindAddress = v + } else { + res.BindAddress = "0.0.0.0" + } + if v, ok := flags["--client-ca-file"]; ok { + delete(flags, "--client-ca-file") + res.ClientCaFile = l.loadCertFileMeta(v) + } + if v, ok := flags["--cluster-signing-cert-file"]; ok { + delete(flags, "--cluster-signing-cert-file") + res.ClusterSigningCertFile = l.loadCertFileMeta(v) + } + if v, ok := flags["--cluster-signing-key-file"]; ok { + delete(flags, "--cluster-signing-key-file") + res.ClusterSigningKeyFile = l.loadKeyFileMeta(v) + } + if v, ok := flags["--feature-gates"]; ok { + delete(flags, "--feature-gates") + res.FeatureGates = v + } + if v, ok := flags["--kubeconfig"]; ok { + delete(flags, "--kubeconfig") + res.Kubeconfig = l.loadKubeconfigMeta(v) + } + if v, ok := flags["--profiling"]; ok { + delete(flags, "--profiling") + res.Profiling = l.parseBool(v) + } else { + res.Profiling = l.parseBool("true") + } + if v, ok := flags["--requestheader-allowed-names"]; ok { + delete(flags, "--requestheader-allowed-names") + res.RequestheaderAllowedNames = strings.Split(v, ",") + } + if v, ok := flags["--requestheader-client-ca-file"]; ok { + delete(flags, "--requestheader-client-ca-file") + res.RequestheaderClientCaFile = l.loadCertFileMeta(v) + } + if v, ok := flags["--requestheader-extra-headers-prefix"]; ok { + delete(flags, "--requestheader-extra-headers-prefix") + res.RequestheaderExtraHeadersPrefix = strings.Split(v, ",") + } else { + res.RequestheaderExtraHeadersPrefix = strings.Split("x-remote-extra-", ",") + } + if v, ok := flags["--requestheader-group-headers"]; ok { + delete(flags, "--requestheader-group-headers") + res.RequestheaderGroupHeaders = strings.Split(v, ",") + } else { + res.RequestheaderGroupHeaders = strings.Split("x-remote-group", ",") + } + if v, ok := flags["--requestheader-username-headers"]; ok { + delete(flags, "--requestheader-username-headers") + res.RequestheaderUsernameHeaders = strings.Split(v, ",") + } else { + res.RequestheaderUsernameHeaders = strings.Split("x-remote-user", ",") + } + if v, ok := flags["--root-ca-file"]; ok { + delete(flags, "--root-ca-file") + res.RootCaFile = l.loadCertFileMeta(v) + } + if v, ok := flags["--secure-port"]; ok { + delete(flags, "--secure-port") + res.SecurePort = l.parseInt(v) + } else { + res.SecurePort = l.parseInt("10257") + } + if v, ok := flags["--service-account-private-key-file"]; ok { + delete(flags, "--service-account-private-key-file") + res.ServiceAccountPrivateKeyFile = l.loadKeyFileMeta(v) + } + if v, ok := flags["--service-cluster-ip-range"]; ok { + delete(flags, "--service-cluster-ip-range") + res.ServiceClusterIpRange = v + } + if v, ok := flags["--terminated-pod-gc-threshold"]; ok { + delete(flags, "--terminated-pod-gc-threshold") + res.TerminatedPodGcThreshold = l.parseInt(v) + } else { + res.TerminatedPodGcThreshold = l.parseInt("12500") + } + if v, ok := flags["--tls-cert-file"]; ok { + delete(flags, "--tls-cert-file") + res.TlsCertFile = l.loadCertFileMeta(v) + } + if v, ok := flags["--tls-cipher-suites"]; ok { + delete(flags, "--tls-cipher-suites") + res.TlsCipherSuites = strings.Split(v, ",") + } + if v, ok := flags["--tls-private-key-file"]; ok { + delete(flags, "--tls-private-key-file") + res.TlsPrivateKeyFile = l.loadKeyFileMeta(v) + } + if v, ok := flags["--use-service-account-credentials"]; ok { + delete(flags, "--use-service-account-credentials") + res.UseServiceAccountCredentials = l.parseBool(v) + } + if len(flags) > 0 { + res.SkippedFlags = flags + } + return &res +} + +type K8sKubeProxyConfig struct { + BindAddress string `json:"bind-address"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + Config *K8sConfigFileMeta `json:"config"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + FeatureGates string `json:"feature-gates"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + HostnameOverride string `json:"hostname-override"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + Kubeconfig *K8sKubeconfigMeta `json:"kubeconfig"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + Profiling bool `json:"profiling"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + SkippedFlags map[string]string `json:"skippedFlags,omitempty"` +} + +func (l *loader) newK8sKubeProxyConfig(flags map[string]string) *K8sKubeProxyConfig { + if flags == nil { + return nil + } + var res K8sKubeProxyConfig + if v, ok := flags["--bind-address"]; ok { + delete(flags, "--bind-address") + res.BindAddress = v + } else { + res.BindAddress = "0.0.0.0" + } + if v, ok := flags["--config"]; ok { + delete(flags, "--config") + res.Config = l.loadConfigFileMeta(v) + } + if v, ok := flags["--feature-gates"]; ok { + delete(flags, "--feature-gates") + res.FeatureGates = v + } + if v, ok := flags["--hostname-override"]; ok { + delete(flags, "--hostname-override") + res.HostnameOverride = v + } + if v, ok := flags["--kubeconfig"]; ok { + delete(flags, "--kubeconfig") + res.Kubeconfig = l.loadKubeconfigMeta(v) + } + if v, ok := flags["--profiling"]; ok { + delete(flags, "--profiling") + res.Profiling = l.parseBool(v) + } + if len(flags) > 0 { + res.SkippedFlags = flags + } + return &res +} + +type K8sKubeletConfig struct { + Address string `json:"address"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + AnonymousAuth bool `json:"anonymous-auth"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + AuthorizationMode string `json:"authorization-mode"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + ClientCaFile *K8sCertFileMeta `json:"client-ca-file"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + Config *K8sConfigFileMeta `json:"config"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + EventBurst int `json:"event-burst"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + EventQps int `json:"event-qps"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + FeatureGates string `json:"feature-gates"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + HostnameOverride string `json:"hostname-override"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + ImageCredentialProviderBinDir *K8sDirMeta `json:"image-credential-provider-bin-dir"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + ImageCredentialProviderConfig *K8sConfigFileMeta `json:"image-credential-provider-config"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + Kubeconfig *K8sKubeconfigMeta `json:"kubeconfig"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + MakeIptablesUtilChains bool `json:"make-iptables-util-chains"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + MaxPods int `json:"max-pods"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + PodMaxPids int `json:"pod-max-pids"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + ProtectKernelDefaults bool `json:"protect-kernel-defaults"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + ReadOnlyPort int `json:"read-only-port"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + RotateCertificates bool `json:"rotate-certificates"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + RotateServerCertificates bool `json:"rotate-server-certificates"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + StreamingConnectionIdleTimeout time.Duration `json:"streaming-connection-idle-timeout"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + TlsCertFile *K8sCertFileMeta `json:"tls-cert-file"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + TlsCipherSuites []string `json:"tls-cipher-suites"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + TlsPrivateKeyFile *K8sKeyFileMeta `json:"tls-private-key-file"` // versions: v1.26.3, v1.25.8, v1.24.12, v1.23.17 + SkippedFlags map[string]string `json:"skippedFlags,omitempty"` +} + +func (l *loader) newK8sKubeletConfig(flags map[string]string) *K8sKubeletConfig { + if flags == nil { + return nil + } + var res K8sKubeletConfig + if v, ok := flags["--address"]; ok { + delete(flags, "--address") + res.Address = v + } else { + res.Address = "0.0.0.0" + } + if v, ok := flags["--anonymous-auth"]; ok { + delete(flags, "--anonymous-auth") + res.AnonymousAuth = l.parseBool(v) + } else { + res.AnonymousAuth = l.parseBool("true") + } + if v, ok := flags["--authorization-mode"]; ok { + delete(flags, "--authorization-mode") + res.AuthorizationMode = v + } else { + res.AuthorizationMode = "AlwaysAllow" + } + if v, ok := flags["--client-ca-file"]; ok { + delete(flags, "--client-ca-file") + res.ClientCaFile = l.loadCertFileMeta(v) + } + if v, ok := flags["--config"]; ok { + delete(flags, "--config") + res.Config = l.loadConfigFileMeta(v) + } + if v, ok := flags["--event-burst"]; ok { + delete(flags, "--event-burst") + res.EventBurst = l.parseInt(v) + } else { + res.EventBurst = l.parseInt("10") + } + if v, ok := flags["--event-qps"]; ok { + delete(flags, "--event-qps") + res.EventQps = l.parseInt(v) + } else { + res.EventQps = l.parseInt("5") + } + if v, ok := flags["--feature-gates"]; ok { + delete(flags, "--feature-gates") + res.FeatureGates = v + } + if v, ok := flags["--hostname-override"]; ok { + delete(flags, "--hostname-override") + res.HostnameOverride = v + } + if v, ok := flags["--image-credential-provider-bin-dir"]; ok { + delete(flags, "--image-credential-provider-bin-dir") + res.ImageCredentialProviderBinDir = l.loadDirMeta(v) + } + if v, ok := flags["--image-credential-provider-config"]; ok { + delete(flags, "--image-credential-provider-config") + res.ImageCredentialProviderConfig = l.loadConfigFileMeta(v) + } + if v, ok := flags["--kubeconfig"]; ok { + delete(flags, "--kubeconfig") + res.Kubeconfig = l.loadKubeconfigMeta(v) + } + if v, ok := flags["--make-iptables-util-chains"]; ok { + delete(flags, "--make-iptables-util-chains") + res.MakeIptablesUtilChains = l.parseBool(v) + } else { + res.MakeIptablesUtilChains = l.parseBool("true") + } + if v, ok := flags["--max-pods"]; ok { + delete(flags, "--max-pods") + res.MaxPods = l.parseInt(v) + } else { + res.MaxPods = l.parseInt("110") + } + if v, ok := flags["--pod-max-pids"]; ok { + delete(flags, "--pod-max-pids") + res.PodMaxPids = l.parseInt(v) + } else { + res.PodMaxPids = l.parseInt("-1") + } + if v, ok := flags["--protect-kernel-defaults"]; ok { + delete(flags, "--protect-kernel-defaults") + res.ProtectKernelDefaults = l.parseBool(v) + } + if v, ok := flags["--read-only-port"]; ok { + delete(flags, "--read-only-port") + res.ReadOnlyPort = l.parseInt(v) + } else { + res.ReadOnlyPort = l.parseInt("10255") + } + if v, ok := flags["--rotate-certificates"]; ok { + delete(flags, "--rotate-certificates") + res.RotateCertificates = l.parseBool(v) + } + if v, ok := flags["--rotate-server-certificates"]; ok { + delete(flags, "--rotate-server-certificates") + res.RotateServerCertificates = l.parseBool(v) + } + if v, ok := flags["--streaming-connection-idle-timeout"]; ok { + delete(flags, "--streaming-connection-idle-timeout") + res.StreamingConnectionIdleTimeout = l.parseDuration(v) + } else { + res.StreamingConnectionIdleTimeout = l.parseDuration("4h0m0s") + } + if v, ok := flags["--tls-cert-file"]; ok { + delete(flags, "--tls-cert-file") + res.TlsCertFile = l.loadCertFileMeta(v) + } + if v, ok := flags["--tls-cipher-suites"]; ok { + delete(flags, "--tls-cipher-suites") + res.TlsCipherSuites = strings.Split(v, ",") + } + if v, ok := flags["--tls-private-key-file"]; ok { + delete(flags, "--tls-private-key-file") + res.TlsPrivateKeyFile = l.loadKeyFileMeta(v) + } + if len(flags) > 0 { + res.SkippedFlags = flags + } + return &res +} + +type K8sEtcdConfig struct { + AutoTls bool `json:"auto-tls"` // versions: v3.5.7, v3.4.18, v3.3.17, v3.2.32 + CertFile *K8sCertFileMeta `json:"cert-file"` // versions: v3.5.7, v3.4.18, v3.3.17, v3.2.32 + ClientCertAuth bool `json:"client-cert-auth"` // versions: v3.5.7, v3.4.18, v3.3.17, v3.2.32 + DataDir *K8sDirMeta `json:"data-dir"` // versions: v3.5.7, v3.4.18, v3.3.17, v3.2.32 + KeyFile *K8sKeyFileMeta `json:"key-file"` // versions: v3.5.7, v3.4.18, v3.3.17, v3.2.32 + PeerAutoTls bool `json:"peer-auto-tls"` // versions: v3.5.7, v3.4.18, v3.3.17, v3.2.32 + PeerCertFile *K8sCertFileMeta `json:"peer-cert-file"` // versions: v3.5.7, v3.4.18, v3.3.17, v3.2.32 + PeerClientCertAuth bool `json:"peer-client-cert-auth"` // versions: v3.5.7, v3.4.18, v3.3.17, v3.2.32 + PeerKeyFile *K8sKeyFileMeta `json:"peer-key-file"` // versions: v3.5.7, v3.4.18, v3.3.17, v3.2.32 + PeerTrustedCaFile *K8sCertFileMeta `json:"peer-trusted-ca-file"` // versions: v3.5.7, v3.4.18, v3.3.17, v3.2.32 + TrustedCaFile *K8sCertFileMeta `json:"trusted-ca-file"` // versions: v3.5.7, v3.4.18, v3.3.17, v3.2.32 + SkippedFlags map[string]string `json:"skippedFlags,omitempty"` +} + +func (l *loader) newK8sEtcdConfig(flags map[string]string) *K8sEtcdConfig { + if flags == nil { + return nil + } + var res K8sEtcdConfig + if v, ok := flags["--auto-tls"]; ok { + delete(flags, "--auto-tls") + res.AutoTls = l.parseBool(v) + } + if v, ok := flags["--cert-file"]; ok { + delete(flags, "--cert-file") + res.CertFile = l.loadCertFileMeta(v) + } + if v, ok := flags["--client-cert-auth"]; ok { + delete(flags, "--client-cert-auth") + res.ClientCertAuth = l.parseBool(v) + } + if v, ok := flags["--data-dir"]; ok { + delete(flags, "--data-dir") + res.DataDir = l.loadDirMeta(v) + } + if v, ok := flags["--key-file"]; ok { + delete(flags, "--key-file") + res.KeyFile = l.loadKeyFileMeta(v) + } + if v, ok := flags["--peer-auto-tls"]; ok { + delete(flags, "--peer-auto-tls") + res.PeerAutoTls = l.parseBool(v) + } + if v, ok := flags["--peer-cert-file"]; ok { + delete(flags, "--peer-cert-file") + res.PeerCertFile = l.loadCertFileMeta(v) + } + if v, ok := flags["--peer-client-cert-auth"]; ok { + delete(flags, "--peer-client-cert-auth") + res.PeerClientCertAuth = l.parseBool(v) + } + if v, ok := flags["--peer-key-file"]; ok { + delete(flags, "--peer-key-file") + res.PeerKeyFile = l.loadKeyFileMeta(v) + } + if v, ok := flags["--peer-trusted-ca-file"]; ok { + delete(flags, "--peer-trusted-ca-file") + res.PeerTrustedCaFile = l.loadCertFileMeta(v) + } + if v, ok := flags["--trusted-ca-file"]; ok { + delete(flags, "--trusted-ca-file") + res.TrustedCaFile = l.loadCertFileMeta(v) + } + if len(flags) > 0 { + res.SkippedFlags = flags + } + return &res +} diff --git a/pkg/compliance/reporter.go b/pkg/compliance/reporter.go index e32e6783bb4dc..d7c7a8cd220f4 100644 --- a/pkg/compliance/reporter.go +++ b/pkg/compliance/reporter.go @@ -6,6 +6,7 @@ package compliance import ( + "encoding/json" "time" coreconfig "github.com/DataDog/datadog-agent/pkg/config" @@ -16,21 +17,17 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/message" "github.com/DataDog/datadog-agent/pkg/logs/pipeline" "github.com/DataDog/datadog-agent/pkg/logs/sources" + "github.com/DataDog/datadog-agent/pkg/security/common" "github.com/DataDog/datadog-agent/pkg/status/health" + "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/startstop" ) -// Reporter is the interface of the output structure that the agent will send -// the resulting evaluation results in a marshalled form. -type Reporter interface { - Endpoints() *config.Endpoints - ReportRaw(content []byte, service string, tags ...string) -} - type LogReporter struct { logSource *sources.LogSource logChan chan *message.Message endpoints *config.Endpoints + tags []string } // NewLogReporter instantiates a new log LogReporter @@ -57,11 +54,13 @@ func NewLogReporter(stopper startstop.Stopper, sourceName, sourceType, runPath s }, ) logChan := pipelineProvider.NextPipelineChan() + tags := []string{common.QueryAccountIdTag()} return &LogReporter{ logSource: logSource, logChan: logChan, endpoints: endpoints, + tags: tags, }, nil } @@ -69,12 +68,14 @@ func (r *LogReporter) Endpoints() *config.Endpoints { return r.endpoints } -func (r *LogReporter) ReportRaw(content []byte, service string, tags ...string) { +func (r *LogReporter) ReportEvent(event interface{}) { + buf, err := json.Marshal(event) + if err != nil { + log.Errorf("failed to serialize compliance event: %v", err) + return + } origin := message.NewOrigin(r.logSource) - origin.SetTags(tags) - origin.SetService(service) - msg := message.NewMessage(content, origin, message.StatusInfo, time.Now().UnixNano()) + origin.SetTags(r.tags) + msg := message.NewMessage(buf, origin, message.StatusInfo, time.Now().UnixNano()) r.logChan <- msg } - -var _ Reporter = &LogReporter{} diff --git a/pkg/compliance/resolver.go b/pkg/compliance/resolver.go index 6ecd544bfb13e..5e5cfe8683653 100644 --- a/pkg/compliance/resolver.go +++ b/pkg/compliance/resolver.go @@ -69,7 +69,7 @@ func DefaultLinuxAuditProvider(ctx context.Context) (LinuxAuditClient, error) { type ResolverOptions struct { Hostname string HostRoot string - StatsdClient statsd.ClientInterface + StatsdClient *statsd.Client DockerProvider KubernetesProvider @@ -221,7 +221,9 @@ func (r *defaultResolver) ResolveInputs(ctx_ context.Context, rule *Rule) (Resol } resolvingContext.InputSpecs[tagName] = spec - resolvingContext.KubernetesCluster = kubernetesCluster + if kubernetesCluster != "" { + resolvingContext.KubernetesCluster = kubernetesCluster + } if r, ok := result.([]interface{}); ok && reflect.ValueOf(r).IsNil() { result = nil @@ -337,7 +339,7 @@ func (r *defaultResolver) resolveFile(ctx context.Context, spec InputSpecFile) ( } else if strings.Contains(path, "*") { result, err = r.resolveFileGlob(ctx, path, spec.Parser) } else { - result, err = r.resolveFilePath(ctx, "", path, spec.Parser) + result, err = r.resolveFilePath(ctx, path, spec.Parser) } if errors.Is(err, os.ErrPermission) || errors.Is(err, os.ErrNotExist) || @@ -347,7 +349,7 @@ func (r *defaultResolver) resolveFile(ctx context.Context, spec InputSpecFile) ( return } -func (r *defaultResolver) resolveFilePath(ctx context.Context, glob, path, parser string) (interface{}, error) { +func (r *defaultResolver) resolveFilePath(ctx context.Context, path, parser string) (interface{}, error) { path = r.pathNormalizeToHostRoot(path) file, err := r.getFileMeta(path) if err != nil { @@ -377,7 +379,7 @@ func (r *defaultResolver) resolveFilePath(ctx context.Context, glob, path, parse } return map[string]interface{}{ "path": r.pathRelativeToHostRoot(path), - "glob": glob, + "glob": "", "permissions": file.perms, "user": file.user, "group": file.group, @@ -399,20 +401,20 @@ func (r *defaultResolver) resolveFileFromProcessFlag(ctx context.Context, name, } } if proc == nil { - return nil, fmt.Errorf("could not find process %q for file content", name) + return nil, nil } cmdLine, err := proc.CmdlineSlice() if err != nil { - return nil, fmt.Errorf("could not get cmdline value for process %q: %w", name, err) + return nil, nil } flags := parseCmdlineFlags(cmdLine) path, ok := flags[flag] if !ok { - return nil, fmt.Errorf("process %q has no flag %q", name, flag) + return nil, nil } - return r.resolveFilePath(ctx, "", path, parser) + return r.resolveFilePath(ctx, path, parser) } func (r *defaultResolver) resolveFileGlob(ctx context.Context, glob, parser string) (interface{}, error) { @@ -420,10 +422,13 @@ func (r *defaultResolver) resolveFileGlob(ctx context.Context, glob, parser stri var resolved []interface{} for _, path := range paths { path = r.pathRelativeToHostRoot(path) - file, err := r.resolveFilePath(ctx, glob, path, parser) + file, err := r.resolveFilePath(ctx, path, parser) if err != nil { continue } + if f, ok := file.(map[string]interface{}); ok { + f["glob"] = glob + } resolved = append(resolved, file) } return resolved, nil diff --git a/pkg/compliance/tests/helpers.go b/pkg/compliance/tests/helpers.go index 041ff4c04a2c5..0d742b495b6ae 100644 --- a/pkg/compliance/tests/helpers.go +++ b/pkg/compliance/tests/helpers.go @@ -290,10 +290,6 @@ func (c *assertedRule) Report(event *compliance.CheckEvent) { c.events = append(c.events, event) } -func (c *assertedRule) ReportRaw(content []byte, service string, tags ...string) { - panic("should not have been called") -} - func buildSuite(name string, rules ...*assertedRule) string { const suiteTpl = `schema: version: 1.0.0 diff --git a/pkg/compliance/tests/process_test.go b/pkg/compliance/tests/process_test.go index 60b6dfdee2d09..30d99ce0822e8 100644 --- a/pkg/compliance/tests/process_test.go +++ b/pkg/compliance/tests/process_test.go @@ -194,9 +194,7 @@ valid(p) { } findings[f] { - count(input.process) == 2 - valid(input.process[0]) - valid(input.process[1]) + count([p | p := input.process[_]; valid(p)]) == 2 f := dd.passed_finding( "sleep", "sleep", diff --git a/pkg/compliance/tools/.gitignore b/pkg/compliance/tools/.gitignore new file mode 100644 index 0000000000000..e660fd93d3196 --- /dev/null +++ b/pkg/compliance/tools/.gitignore @@ -0,0 +1 @@ +bin/ diff --git a/pkg/compliance/tools/k8s_types_generator.go b/pkg/compliance/tools/k8s_types_generator.go new file mode 100644 index 0000000000000..06d3f40284921 --- /dev/null +++ b/pkg/compliance/tools/k8s_types_generator.go @@ -0,0 +1,781 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package main + +import ( + "bufio" + "bytes" + "fmt" + "io" + "log" + "net" + "net/http" + "os" + "os/exec" + "path" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode" + + "golang.org/x/exp/slices" +) + +var ( + bindir = "" + arch = "" + + k8sComponents = []string{"kube-apiserver", "kube-scheduler", "kube-controller-manager", "kube-proxy", "kubelet"} + + // https://kubernetes.io/releases/ + k8sVersions = []string{ + "v1.26.3", + "v1.25.8", + "v1.24.12", + "v1.23.17", + } + + // https://github.com/kubernetes/kubernetes/blob/c3e7eca7fd38454200819b60e58144d5727f1bbc/cluster/images/etcd/Makefile#L18 + // "v3.0.17", "v3.1.20" removed because they do not have ARM64 tarballs + etcdVersions = []string{ + "v3.5.7", + "v3.4.18", + "v3.3.17", + "v3.2.32", + } + + knownFlags = []string{ + "--address", + "--admission-control-config-file", + "--allow-privileged", + "--anonymous-auth", + "--audit-log-maxage", + "--audit-log-maxbackup", + "--audit-log-maxsize", + "--audit-log-path", + "--audit-policy-file", + "--authentication-kubeconfig", + "--authorization-kubeconfig", + "--authorization-mode", + "--auto-tls", + "--bind-address", + "--cert-file", + "--client-ca-file", + "--client-cert-auth", + "--cluster-signing-cert-file", + "--cluster-signing-key-file", + "--config", + "--data-dir", + "--disable-admission-plugins", + "--enable-admission-plugins", + "--enable-bootstrap-token-auth", + "--encryption-provider-config", + "--etcd-cafile", + "--etcd-certfile", + "--etcd-keyfile", + "--event-burst", + "--event-qps", + "--feature-gates", + "--hostname-override", + "--image-credential-provider-bin-dir", + "--image-credential-provider-config", + "--key-file", + "--kubeconfig", + "--kubelet-certificate-authority", + "--kubelet-client-certificate", + "--kubelet-client-key", + "--make-iptables-util-chains", + "--max-pods", + "--peer-auto-tls", + "--peer-cert-file", + "--peer-client-cert-auth", + "--peer-key-file", + "--peer-trusted-ca-file", + "--pod-max-pids", + "--profiling", + "--protect-kernel-defaults", + "--proxy-client-cert-file", + "--proxy-client-key-file", + "--read-only-port", + "--request-timeout", + "--requestheader-allowed-names", + "--requestheader-client-ca-file", + "--requestheader-extra-headers-prefix", + "--requestheader-group-headers", + "--requestheader-username-headers", + "--root-ca-file", + "--rotate-certificates", + "--rotate-server-certificates", + "--secure-port", + "--service-account-issuer", + "--service-account-key-file", + "--service-account-lookup", + "--service-account-private-key-file", + "--service-account-signing-key-file", + "--service-cluster-ip-range", + "--streaming-connection-idle-timeout", + "--terminated-pod-gc-threshold", + "--tls-cert-file", + "--tls-cipher-suites", + "--tls-private-key-file", + "--token-auth-file", + "--trusted-ca-file", + "--use-service-account-credentials", + } +) + +const preamble = `// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// !!! +// This is a generated file: regenerate with go run ./pkg/compliance/tools/k8s_types_generator.go +// !!! +package k8sconfig + +import ( + "time" + "strings" +) +` + +type conf struct { + versions []string + flagName string + flagType string + flagDefault string + goType string +} + +type komponent struct { + name string + version string + confs []*conf +} + +// go run ./pkg/compliance/tools/k8s_types_generator.go ./pkg/compliance/tools/bin | gofmt > ./pkg/compliance/k8sconfig/types_generated.go +func main() { + dir, _ := os.Getwd() + if len(os.Args) < 2 { + fmt.Println("generator ") + log.Fatal("missing bindir path") + } + bindir = filepath.Join(dir, os.Args[1]) + if info, err := os.Stat(bindir); err != nil || !info.IsDir() { + log.Fatalf("bindir path %s is not a directory", bindir) + } + uname, _ := exec.Command("uname", "-m").Output() + switch string(bytes.TrimSuffix(uname, []byte("\n"))) { + case "x86_64": + arch = "amd64" + case "aarch64": + arch = "arm64" + default: + log.Fatalf("could not resolve arch=%s", uname) + } + + fmt.Print(preamble) + var allKomponents []*komponent + for _, component := range k8sComponents { + var komponents []*komponent + for _, version := range k8sVersions { + komp := downloadKubeComponentAndExtractFlags(component, version) + komponents = append(komponents, komp) + } + mergedKomp := unionKomponents(komponents...) + allKomponents = append(allKomponents, mergedKomp) + fmt.Println(printKomponentCode(mergedKomp)) + } + + { + var komponents []*komponent + for _, version := range etcdVersions { + komp := downloadEtcdAndExtractFlags(version) + komponents = append(komponents, komp) + } + mergedKomp := unionKomponents(komponents...) + allKomponents = append(allKomponents, mergedKomp) + fmt.Println(printKomponentCode(mergedKomp)) + } + + var knownFlagsClone []string + knownFlagsClone = append(knownFlagsClone, knownFlags...) + for _, komponent := range allKomponents { + for _, conf := range komponent.confs { + i := slices.Index(knownFlagsClone, "--"+conf.flagName) + if i >= 0 { + knownFlagsClone = append(knownFlagsClone[:i], knownFlagsClone[i+1:]...) + } + } + } + if len(knownFlagsClone) > 0 { + panic(fmt.Errorf("these flags were not found: %v", knownFlagsClone)) + } +} + +func defaultedType(conf *conf) *conf { + if conf.flagName == "kubeconfig" || conf.flagName == "authentication-kubeconfig" { + conf.flagType = "kubeconfig" + } else if conf.flagType == "string" || conf.flagType == "stringArray" { + switch { + case strings.Contains(conf.flagName, "cert"), + strings.Contains(conf.flagName, "cafile"), + strings.Contains(conf.flagName, "ca-file"): + conf.flagType = "certificate_file" + case strings.HasSuffix(conf.flagName, "keyfile"), + strings.HasSuffix(conf.flagName, "key-file"), + strings.HasSuffix(conf.flagName, "key"): + conf.flagType = "key_file" + case strings.Contains(conf.flagName, "token"): + conf.flagType = "token_file" + case conf.flagName == "encryption-provider-config": + conf.flagType = "encryption_config_file" + case conf.flagName == "admission-control-config-file": + conf.flagType = "admission_config_file" + case conf.flagName == "config", conf.flagName == "audit-policy-file", conf.flagName == "image-credential-provider-config": + conf.flagType = "config_file" + case strings.Contains(conf.flagName, "dir"): + conf.flagType = "dir" + } + } + + switch conf.flagType { + case "bool": + conf.flagDefault = parseTypeBool(conf.flagDefault) + conf.goType = "bool" + case "cidrs": + conf.flagDefault = parseTypeCIDRs(conf.flagDefault) + conf.goType = "string" + case "duration": + conf.flagDefault = parseTypeDuration(conf.flagDefault) + conf.goType = "time.Duration" + case "float", "float32": + conf.flagDefault = parseTypeFloat(conf.flagDefault) + conf.goType = "float64" + case "int", "int32", "quantity", "uint": + conf.flagDefault = parseTypeNumber(conf.flagDefault) + conf.goType = "int" + case "ip", "ipport": + conf.flagDefault = parseTypeIP(conf.flagDefault) + conf.goType = "string" + case "mapStringBool": + conf.flagDefault = parseEmptyDefault(conf.flagDefault) + conf.goType = "string" + case "mapStringString": + conf.flagDefault = parseEmptyDefault(conf.flagDefault) + conf.goType = "string" + case "namedCertKey": + conf.flagDefault = parseEmptyDefault(conf.flagDefault) + conf.goType = "string" + case "portRange": + conf.flagDefault = parseTypeRange(conf.flagDefault) + conf.goType = "string" + case "severity": + conf.flagDefault = parseTypeNumber(conf.flagDefault) + conf.goType = "int" + case "string", + "LocalMode", "ProxyMode", "RuntimeDefault": // https://kubernetes.io/docs/reference/config-api/kube-proxy-config.v1alpha1/#kubeproxy-config-k8s-io-v1alpha1-LocalMode + conf.flagDefault = parseTypeString(conf.flagDefault) + conf.goType = "string" + case "kubeconfig": + conf.flagDefault = parseTypeString(conf.flagDefault) + conf.goType = "*K8sKubeconfigMeta" + case "certificate_file": + conf.flagDefault = parseTypeString(conf.flagDefault) + conf.goType = "*K8sCertFileMeta" + case "key_file": + conf.flagDefault = parseTypeString(conf.flagDefault) + conf.goType = "*K8sKeyFileMeta" + case "config_file": + conf.flagDefault = parseTypeString(conf.flagDefault) + conf.goType = "*K8sConfigFileMeta" + case "admission_config_file": + conf.flagDefault = parseTypeString(conf.flagDefault) + conf.goType = "*K8sAdmissionConfigFileMeta" + case "encryption_config_file": + conf.flagDefault = parseTypeString(conf.flagDefault) + conf.goType = "*K8sEncryptionProviderConfigFileMeta" + case "token_file": + conf.flagDefault = parseTypeString(conf.flagDefault) + conf.goType = "*K8sTokenFileMeta" + case "dir": + conf.flagDefault = parseTypeString(conf.flagDefault) + conf.goType = "*K8sDirMeta" + case "strings", "moduleSpec": + conf.flagDefault = parseTypeStringsArray(conf.flagDefault) + conf.goType = "[]string" + case "stringToString", "stringArray": + conf.flagDefault = parseEmptyDefault(conf.flagDefault) + conf.goType = "string" + case "traceLocation": + conf.flagDefault = parseEmptyDefault(conf.flagDefault) + conf.goType = "string" + case "colonSeparatedMultimapStringString": + // k8s.io/component-base/cli/flag + conf.flagDefault = parseEmptyDefault(conf.flagDefault) + conf.goType = "string" + } + if conf.flagDefault == "${name}.etcd" { + conf.flagDefault = "" + } + if conf.goType == "" { + log.Fatalf("unknown type for flag %q: %s (%q)", conf.flagName, conf.flagType, conf.flagDefault) + } + return conf +} + +func unionKomponents(ks ...*komponent) *komponent { + var confs []*conf + for _, k := range ks { + for _, newConf := range k.confs { + var conf *conf + for _, c := range confs { + if c.flagName == newConf.flagName { + conf = c + } + } + if conf == nil { + confs = append(confs, newConf) + conf = newConf + } else { + if conf.flagType != newConf.flagType { + panic("TODO: different types across versions") + } + } + conf.versions = append(conf.versions, k.version) + } + } + sort.Slice(confs, func(i, j int) bool { + return strings.Compare(confs[i].flagName, confs[j].flagName) < 0 + }) + return &komponent{ + name: ks[0].name, + version: ks[0].version, + confs: confs, + } +} + +func printKomponentCode(komp *komponent) string { + printAssignment := func(c *conf, v string) string { + switch c.goType { + case "string", "ip": + return fmt.Sprintf("res.%s = %s", toGoField(c.flagName), v) + case "bool": + return fmt.Sprintf("res.%s = l.parseBool(%s)", toGoField(c.flagName), v) + case "float64": + return fmt.Sprintf("res.%s = l.parseFloat(%s, 64)", toGoField(c.flagName), v) + case "int", "uint": + return fmt.Sprintf("res.%s = l.parseInt(%s)", toGoField(c.flagName), v) + case "time.Duration": + return fmt.Sprintf("res.%s = l.parseDuration(%s)", toGoField(c.flagName), v) + case "[]string": + return fmt.Sprintf("res.%s = strings.Split(%s, \",\")", toGoField(c.flagName), v) + case "*K8sKubeconfigMeta": + return fmt.Sprintf("res.%s = l.loadKubeconfigMeta(%s)", toGoField(c.flagName), v) + case "*K8sCertFileMeta": + return fmt.Sprintf("res.%s = l.loadCertFileMeta(%s)", toGoField(c.flagName), v) + case "*K8sKeyFileMeta": + return fmt.Sprintf("res.%s = l.loadKeyFileMeta(%s)", toGoField(c.flagName), v) + case "*K8sTokenFileMeta": + return fmt.Sprintf("res.%s = l.loadTokenFileMeta(%s)", toGoField(c.flagName), v) + case "*K8sConfigFileMeta": + return fmt.Sprintf("res.%s = l.loadConfigFileMeta(%s)", toGoField(c.flagName), v) + case "*K8sAdmissionConfigFileMeta": + return fmt.Sprintf("res.%s = l.loadAdmissionConfigFileMeta(%s)", toGoField(c.flagName), v) + case "*K8sEncryptionProviderConfigFileMeta": + return fmt.Sprintf("res.%s = l.loadEncryptionProviderConfigFileMeta(%s)", toGoField(c.flagName), v) + case "*K8sDirMeta": + return fmt.Sprintf("res.%s = l.loadDirMeta(%s)", toGoField(c.flagName), v) + default: + panic(fmt.Errorf("non supported type %s %s %s %q for with default %s", komp.name, komp.version, c.flagName, c.goType, c.flagDefault)) + } + } + + goStructName := strings.ReplaceAll(strings.Title(komp.name), "-", "") + s := "" + s += fmt.Sprintf("type K8s%sConfig struct {\n", goStructName) + for _, c := range komp.confs { + if !isKnownFlag(c.flagName) { + continue + } + s += fmt.Sprintf(" %s %s `json:\"%s\"` // versions: %s\n", + toGoField(c.flagName), c.goType, toGoJSONTag(c.flagName), strings.Join(c.versions, ", ")) + } + s += fmt.Sprint(" SkippedFlags map[string]string `json:\"skippedFlags,omitempty\"`\n") + s += "}\n" + s += fmt.Sprintf("func (l *loader) newK8s%sConfig(flags map[string]string) *K8s%sConfig {\n", goStructName, goStructName) + s += fmt.Sprintf("if (flags == nil) { return nil }\n") + s += fmt.Sprintf("var res K8s%sConfig\n", goStructName) + for _, c := range komp.confs { + if !isKnownFlag(c.flagName) { + continue + } + s += fmt.Sprintf("if v, ok := flags[\"--%s\"]; ok {\n", c.flagName) + s += fmt.Sprintf("delete(flags, \"--%s\")\n", c.flagName) + s += printAssignment(c, "v") + if c.flagDefault != "" { + s += fmt.Sprintf("\n} else {\n") + s += printAssignment(c, fmt.Sprintf("%q", c.flagDefault)) + } + s += fmt.Sprintf("}\n") + } + s += fmt.Sprintf("if len(flags) > 0 { res.SkippedFlags = flags }\n") + s += fmt.Sprintf("return &res\n") + s += fmt.Sprintf("}\n") + return s +} + +func downloadEtcdAndExtractFlags(componentVersion string) *komponent { + componentName := "etcd" + componentBin := path.Join(bindir, fmt.Sprintf("%s-%s", componentName, componentVersion)) + componentUrl := fmt.Sprintf("https://github.com/etcd-io/etcd/releases/download/%s/etcd-%s-linux-%s.tar.gz", + componentVersion, componentVersion, arch) + + if _, err := os.Stat(componentBin); os.IsNotExist(err) { + fmt.Fprintf(os.Stderr, "downloading %s into %s", componentUrl, componentBin) + if err := download(componentUrl, componentBin); err != nil { + log.Fatal(err) + } + } + + if err := os.Chmod(componentBin, 0770); err != nil { + log.Fatal(err) + } + + cmd := exec.Command(componentBin, "-h") + cmd.Env = os.Environ() + cmd.Env = append(cmd.Env, "ETCD_UNSUPPORTED_ARCH=arm64") + out, err := cmd.Output() + if err != nil { + log.Fatal(err) + } + var confs []*conf + scanner := bufio.NewScanner(bytes.NewReader(out)) + for scanner.Scan() { + line := scanner.Text() + conf, ok := scanEtcdHelpLine(line) + if ok { + confs = append(confs, defaultedType(conf)) + } + } + return &komponent{ + name: componentName, + version: componentVersion, + confs: confs, + } +} + +func downloadKubeComponentAndExtractFlags(componentName, componentVersion string) *komponent { + componentBin := path.Join(bindir, fmt.Sprintf("%s-%s", componentName, componentVersion)) + componentUrl := fmt.Sprintf("https://dl.k8s.io/%s/bin/linux/%s/%s", + componentVersion, arch, componentName) + if _, err := os.Stat(componentBin); os.IsNotExist(err) { + fmt.Fprintf(os.Stderr, "downloading %s into %s", componentUrl, componentBin) + if err := download(componentUrl, componentBin); err != nil { + log.Fatal(err) + } + } + + if err := os.Chmod(componentBin, 0770); err != nil { + log.Fatal(err) + } + + cmd := exec.Command(componentBin, "-h") + out, err := cmd.Output() + if err != nil { + log.Fatal(err) + } + + var confs []*conf + scanner := bufio.NewScanner(bytes.NewReader(out)) + for scanner.Scan() { + line := scanner.Text() + conf, ok := scanK8sHelpLine(line) + if ok { + confs = append(confs, defaultedType(conf)) + } + } + + return &komponent{ + name: componentName, + version: componentVersion, + confs: confs, + } +} + +func toGoField(s string) string { + return strings.ReplaceAll(strings.Title(s), "-", "") +} + +func toGoJSONTag(s string) string { + return s +} + +func scanEtcdHelpLine(line string) (*conf, bool) { + var conf conf + var ok bool + + str := eatWhitespace(line) + conf.flagName, str, ok = eatRegexp(str, "--([a-zA-Z0-9-]+)", 1) + if !ok { + return nil, false + } + if conf.flagName == "log-rotation-config-json" { // json flag + return nil, false + } + if strings.HasPrefix(conf.flagName, "experimental-") { + return nil, false + } + + str = eatWhitespace(str) + if str == "" { + conf.flagDefault = "false" + conf.flagType = "bool" + return &conf, true + } + + conf.flagDefault, str, ok = eatRegexp(str, "'(true|false)'", 1) + if ok { + conf.flagType = "bool" + conf.goType = "bool" + return &conf, true + } + conf.flagDefault, str, ok = eatRegexp(str, "([0-9]+)", 1) + if ok { + conf.flagType = "int" + conf.goType = "int" + return &conf, true + } + conf.flagDefault, _, ok = eatRegexp(str, "'(\\S*)'", 1) + if ok { + conf.flagType = "string" + conf.goType = "string" + return &conf, true + } + + log.Fatalf("could not flag line: %s", line) + return nil, false +} + +func scanK8sHelpLine(line string) (*conf, bool) { + var conf conf + var ok bool + + str := eatWhitespace(line) + conf.flagName, str, ok = eatRegexp(str, "--([a-zA-Z0-9-]+)", 1) + if !ok { + return nil, false + } + + str = eatWhitespace(str) + conf.flagType, str, ok = eatRegexp(str, "([a-zA-Z0-9]+)[ ]{3,}", 1) + if ok { + str = eatWhitespace(str) + } + + if idx := strings.Index(str, "[default="); idx >= 0 { + conf.flagDefault = scanDefaultValue(str[idx+len("[default="):], '[', ']') + } else if idx := strings.Index(str, "[default "); idx >= 0 { + conf.flagDefault = scanDefaultValue(str[idx+len("[default "):], '[', ']') + } else if idx := strings.Index(str, "(default "); idx >= 0 { + conf.flagDefault = scanDefaultValue(str[idx+len("(default "):], '(', ')') + } + if conf.flagType == "" { + conf.flagType = "bool" + } + return &conf, true +} + +func scanDefaultValue(str string, op, cl rune) string { + var length int + balance := 1 + for _, r := range str { + if r == op { + balance++ + } else if r == cl { + balance-- + } + length++ + if balance == 0 { + break + } + } + val := str[:length] + val = strings.TrimPrefix(val, string(op)+"default") + val = strings.TrimSuffix(val, string(cl)) + return strings.TrimSpace(val) +} + +func parseTypeBool(str string) string { + if str == "" { + str = "false" + } + b, err := strconv.ParseBool(str) + if err != nil { + log.Fatal(err) + } + if b { + return "true" + } + return "" +} + +func parseTypeCIDRs(str string) string { + var cidrs []string + for _, s := range strings.Split(str, ",") { + s = strings.TrimSpace(s) + _, _, err := net.ParseCIDR(s) + if err != nil { + log.Fatal(err) + } + cidrs = append(cidrs, s) + } + return strings.Join(cidrs, ",") +} + +func parseTypeDuration(str string) string { + if str == "" { + str = "0" + } + _, err := time.ParseDuration(str) + if err != nil { + log.Fatal(err) + } + return str +} + +func parseTypeFloat(str string) string { + if str == "" { + str = "0.0" + } + _, err := strconv.ParseFloat(str, 64) + if err != nil { + log.Fatal(err) + } + return str +} + +func parseTypeNumber(str string) string { + if str == "" { + str = "0" + } + _, err := strconv.Atoi(str) + if err != nil { + log.Fatal(err) + } + return str +} + +func parseTypeIP(str string) string { + ip := net.ParseIP(str) + return ip.String() +} + +func parseTypeRange(str string) string { + r := regexp.MustCompile("^[0-9]+-[0-9]+$") + if !r.MatchString(str) { + log.Fatalf("bad range type default %q", str) + } + return str +} + +func parseTypeString(str string) string { + if strings.HasPrefix(str, "\"") { + if !strings.HasSuffix(str, "\"") { + log.Fatalf("bad string type default %q", str) + } + return str[1 : len(str)-1] + } + if strings.HasPrefix(str, "'") { + if !strings.HasSuffix(str, "'") { + log.Fatalf("bad string type default %q", str) + } + return str[1 : len(str)-1] + } + return str +} + +func parseTypeStringsArray(str string) string { + if strings.HasPrefix(str, "[") { + if !strings.HasSuffix(str, "]") { + log.Fatalf("bad string type default %q", str) + } + str = str[1 : len(str)-1] + } + strs := strings.Split(str, ",") + for i, str := range strs { + strs[i] = strings.TrimSpace(str) + } + return strings.Join(strs, ",") +} + +func parseEmptyDefault(str string) string { + if str == "imagefs.available<15%,memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%" { + // special case for deprecated flag with said default kubelet --eviction-hard + return "" + } + if str != "" && str != "[]" && str != "none" && str != ":0" { + log.Fatalf("bad empty type default %q", str) + } + return "" +} + +func eatWhitespace(str string) string { + i := 0 + for _, r := range str { + if !unicode.IsSpace(r) { + break + } + i++ + } + return str[i:] +} + +func eatRegexp(str, reg string, group int) (string, string, bool) { + r := regexp.MustCompile("^" + reg) + loc := r.FindStringSubmatchIndex(str) + if loc != nil { + if loc[0] != 0 { + panic("programmer error") + } + if len(loc)/2 < group { + panic("programmer error") + } + return str[loc[2*group]:loc[2*group+1]], str[loc[1]:], true + } + return "", str, false +} + +func download(url, dist string) error { + resp, err := http.Get(url) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("could not download %s: err %d", url, resp.StatusCode) + } + f, err := os.Create(dist) + if err != nil { + return err + } + defer f.Close() + _, err = io.Copy(f, resp.Body) + return err +} + +func isKnownFlag(flag string) bool { + for _, cisFlag := range knownFlags { + if "--"+flag == cisFlag { + return true + } + } + return false +} diff --git a/pkg/config/autodiscovery/autodiscovery.go b/pkg/config/autodiscovery/autodiscovery.go index 73eba8449bfeb..d3f714f3bd8a1 100644 --- a/pkg/config/autodiscovery/autodiscovery.go +++ b/pkg/config/autodiscovery/autodiscovery.go @@ -73,8 +73,9 @@ func DiscoverComponentsFromEnv() ([]config.ConfigurationProviders, []config.List detectedListeners := []config.Listeners{} // When using automatic discovery of providers/listeners - // We automatically activate the environment listener + // We automatically activate the environment and static config listener detectedListeners = append(detectedListeners, config.Listeners{Name: "environment"}) + detectedListeners = append(detectedListeners, config.Listeners{Name: "static config"}) // Automatic handling of AD providers/listeners should only run in Core agent. if flavor.GetFlavor() != flavor.DefaultAgent { diff --git a/pkg/config/config.go b/pkg/config/config.go index d2bfe8603cd3c..1c7b0cba4e5d4 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -518,6 +518,11 @@ func InitConfig(config Config) { config.BindEnvAndSetDefault("dogstatsd_stats_port", 5000) config.BindEnvAndSetDefault("dogstatsd_stats_enable", false) config.BindEnvAndSetDefault("dogstatsd_stats_buffer", 10) + // Control how dogstatsd-stats logs can be generated + config.BindEnvAndSetDefault("dogstatsd_log_file", "") + config.BindEnvAndSetDefault("dogstatsd_logging_enabled", true) + config.BindEnvAndSetDefault("dogstatsd_log_file_max_rolls", 3) + config.BindEnvAndSetDefault("dogstatsd_log_file_max_size", "10Mb") // Control for how long counter would be sampled to 0 if not received config.BindEnvAndSetDefault("dogstatsd_expiry_seconds", 300) // Control how long we keep dogstatsd contexts in memory. @@ -543,6 +548,7 @@ func InitConfig(config Config) { config.BindEnvAndSetDefault("dogstatsd_no_aggregation_pipeline", true) // How many metrics maximum in payloads sent by the no-aggregation pipeline to the intake. config.BindEnvAndSetDefault("dogstatsd_no_aggregation_pipeline_batch_size", 2048) + config.BindEnvAndSetDefault("dogstatsd_max_metrics_tags", 0) // 0 = disabled. // To enable the following feature, GODEBUG must contain `madvdontneed=1` config.BindEnvAndSetDefault("dogstatsd_mem_based_rate_limiter.enabled", false) @@ -557,6 +563,13 @@ func InitConfig(config Config) { config.BindEnvAndSetDefault("dogstatsd_mem_based_rate_limiter.soft_limit_freeos_check.max", 0.1) config.BindEnvAndSetDefault("dogstatsd_mem_based_rate_limiter.soft_limit_freeos_check.factor", 1.5) + config.BindEnvAndSetDefault("dogstatsd_context_limiter.limit", 0) // 0 = disabled. + config.BindEnvAndSetDefault("dogstatsd_context_limiter.entry_timeout", 1) // number of flush intervals + config.BindEnvAndSetDefault("dogstatsd_context_limiter.key_tag_name", "pod_name") + config.BindEnvAndSetDefault("dogstatsd_context_limiter.telemetry_tag_names", []string{}) + config.BindEnvAndSetDefault("dogstatsd_context_limiter.bytes_per_context", 1500) + config.BindEnvAndSetDefault("dogstatsd_context_limiter.cgroup_memory_ratio", 0.0) + config.BindEnv("dogstatsd_mapper_profiles") config.SetEnvKeyTransformer("dogstatsd_mapper_profiles", func(in string) interface{} { var mappings []MappingProfile @@ -997,6 +1010,8 @@ func InitConfig(config Config) { config.BindEnvAndSetDefault("cluster_checks.extra_tags", []string{}) config.BindEnvAndSetDefault("cluster_checks.advanced_dispatching_enabled", false) config.BindEnvAndSetDefault("cluster_checks.clc_runners_port", 5005) + config.BindEnvAndSetDefault("cluster_checks.exclude_checks", []string{}) + // Cluster check runner config.BindEnvAndSetDefault("clc_runner_enabled", false) config.BindEnvAndSetDefault("clc_runner_id", "") @@ -1044,6 +1059,7 @@ func InitConfig(config Config) { // This create a lot of billable custom metrics. config.BindEnvAndSetDefault("telemetry.enabled", false) config.BindEnvAndSetDefault("telemetry.dogstatsd_origin", false) + config.BindEnvAndSetDefault("telemetry.dogstatsd_limiter", true) config.BindEnvAndSetDefault("telemetry.python_memory", true) config.BindEnv("telemetry.checks") // We're using []string as a default instead of []float64 because viper can only parse list of string from the environment @@ -1082,14 +1098,36 @@ func InitConfig(config Config) { config.BindEnvAndSetDefault("orchestrator_explorer.manifest_collection.buffer_flush_interval", 20*time.Second) // Container lifecycle configuration + config.BindEnvAndSetDefault("container_lifecycle.enabled", false) bindEnvAndSetLogsConfigKeys(config, "container_lifecycle.") // Container image configuration + config.BindEnvAndSetDefault("container_image.enabled", false) bindEnvAndSetLogsConfigKeys(config, "container_image.") // SBOM configuration + config.BindEnvAndSetDefault("sbom.enabled", false) bindEnvAndSetLogsConfigKeys(config, "sbom.") - setupSBOMConfig(config, "sbom-agent") + + config.BindEnvAndSetDefault("sbom.cache_directory", filepath.Join(defaultRunPath, "sbom-agent")) + config.BindEnvAndSetDefault("sbom.clear_cache_on_exit", false) + config.BindEnvAndSetDefault("sbom.cache.enabled", false) + config.BindEnvAndSetDefault("sbom.cache.max_disk_size", 1000*1000*100) // used by custom cache: max disk space used by cached objects. Not equal to max disk usage + config.BindEnvAndSetDefault("sbom.cache.max_cache_entries", 10000) // used by custom cache keys stored in memory + config.BindEnvAndSetDefault("sbom.cache.clean_interval", "30m") // used by custom cache. + + // Container SBOM configuration + config.BindEnvAndSetDefault("sbom.container_image.enabled", false) + config.BindEnvAndSetDefault("sbom.container_image.use_mount", false) + config.BindEnvAndSetDefault("sbom.container_image.scan_interval", 0) // Integer seconds + config.BindEnvAndSetDefault("sbom.container_image.scan_timeout", 10*60) // Integer seconds + config.BindEnvAndSetDefault("sbom.container_image.analyzers", []string{"os"}) + config.BindEnvAndSetDefault("sbom.container_image.check_disk_usage", true) + config.BindEnvAndSetDefault("sbom.container_image.min_available_disk", "1Gb") + + // Host SBOM configuration + config.BindEnvAndSetDefault("sbom.host.enabled", false) + config.BindEnvAndSetDefault("sbom.host.analyzers", []string{"os"}) // Orchestrator Explorer - process agent // DEPRECATED in favor of `orchestrator_explorer.orchestrator_dd_url` setting. If both are set `orchestrator_explorer.orchestrator_dd_url` will take precedence. @@ -1111,16 +1149,6 @@ func InitConfig(config Config) { config.BindEnvAndSetDefault("inventories_max_interval", DefaultInventoriesMaxInterval) // integer seconds config.BindEnvAndSetDefault("inventories_min_interval", DefaultInventoriesMinInterval) // integer seconds - // container_image_collection - config.BindEnvAndSetDefault("container_image_collection.metadata.enabled", false) - config.BindEnvAndSetDefault("container_image_collection.sbom.enabled", false) - config.BindEnvAndSetDefault("container_image_collection.sbom.use_mount", false) - config.BindEnvAndSetDefault("container_image_collection.sbom.scan_interval", 0) // Integer seconds - config.BindEnvAndSetDefault("container_image_collection.sbom.scan_timeout", 10*60) // Integer seconds - config.BindEnvAndSetDefault("container_image_collection.sbom.analyzers", []string{"os"}) - config.BindEnvAndSetDefault("container_image_collection.sbom.check_disk_usage", true) - config.BindEnvAndSetDefault("container_image_collection.sbom.min_available_disk", "1Gb") - // Datadog security agent (common) config.BindEnvAndSetDefault("security_agent.cmd_port", 5010) config.BindEnvAndSetDefault("security_agent.expvar_port", 5011) @@ -1661,17 +1689,6 @@ func setupFipsLogsConfig(config Config, configPrefix string, url string) { config.Set(configPrefix+"logs_dd_url", url) } -func setupSBOMConfig(config Config, cacheDir string) { - config.BindEnvAndSetDefault("sbom.enabled", false) - config.BindEnvAndSetDefault("sbom.analyzers", []string{"os"}) - config.BindEnvAndSetDefault("sbom.cache_directory", filepath.Join(defaultRunPath, cacheDir)) - config.BindEnvAndSetDefault("sbom.clear_cache_on_exit", false) - config.BindEnvAndSetDefault("sbom.use_custom_cache", false) - config.BindEnvAndSetDefault("sbom.custom_cache_max_disk_size", 1000*1000*100) // used by custom cache: max disk space used by cached objects. Not equal to max disk usage - config.BindEnvAndSetDefault("sbom.custom_cache_max_cache_entries", 10000) // used by custom cache keys stored in memory - config.BindEnvAndSetDefault("sbom.cache_clean_interval", "30m") // used by custom cache. -} - // ResolveSecrets merges all the secret values from origin into config. Secret values // are identified by a value of the form "ENC[key]" where key is the secret key. // See: https://github.com/DataDog/datadog-agent/blob/main/docs/agent/secrets.md diff --git a/pkg/config/config_template.yaml b/pkg/config/config_template.yaml index 932e7e3eb0f76..aa3564e879df4 100644 --- a/pkg/config/config_template.yaml +++ b/pkg/config/config_template.yaml @@ -1661,6 +1661,39 @@ api_key: ## Enable internal profiling for the System Probe process. # # enabled: false + + ## @param memory_controller - custom object - optional + ## Cgroup memory controller for internal memory profiling. + ## + ## memory_controller: + # + ## @param enabled - boolean - optional - default: false + ## Enable cgroup memory controller. + # + # enabled: false + # + ## @param thresholds - map of strings - optional + ## Thresholds and the respective active actions to trigger when + ## memory usage is above the specified threshold. + ## Threshold can be either an absolute value - such as 500MB or 2GB - + ## or a percentage of the cgroup allocated memory such as 50%. + ## The action can be: + ## - gc: to trigger the Go garbage collector + ## - profile: to generate a system-probe memory profile in /tmp + ## - log: to simply log that the threshold was reached + # + # thresholds: + # 500MB: gc + # 50%: profile + + ## @param pressure_levels - map of strings - optional + ## Pressure levels and the respective active actions to trigger when + ## memory usage reaches the specified level. + ## The pressure level is 'low', 'medium' or 'critical'. + ## The actions are the same for thresholds (see above). + # + # pressure_levels: + # medium: gc {{ end }} {{- if .NetworkModule }} @@ -1825,12 +1858,9 @@ api_key: {{ end -}} {{- if .SecurityAgent -}} -########################################## -## Security Agent Runtime Configuration ## -## ## -## Settings to sent logs to Datadog are ## -## fetched from section `logs_config` ## -########################################## +#################################### +## Runtime Security configuration ## +#################################### # runtime_security_config: ## @param enabled - boolean - optional - default: false @@ -1842,6 +1872,16 @@ api_key: ## The full path to the location of the unix socket where security runtime module is accessed. # # socket: /opt/datadog-agent/run/runtime-security.sock + +########################################## +## Compliance monitoring configuration ## +########################################## + +# compliance_config: + ## @param enabled - boolean - optional - default: false + ## Set to true to enable Cloud Security Posture Management (CSPM). + # + # enabled: false {{ end -}} {{- if .Dogstatsd }} @@ -1910,6 +1950,18 @@ api_key: # # dogstatsd_stats_enable: false +## @param dogstatsd_logging_enabled - boolean - optional - default: true +## Set to true to write DogstatsD metrics received by the Agent to dogstats_stats log files. +## Requires `dogstatsd_stats_enable: true`. +# +# dogstatsd_logging_enabled: true + +## @param dogstatsd_log_file_max_size - custom - optional - default: 10MB +## Maximum size of dogstatsd log file. Use either a size (for example, 10MB) or +## provide value in bytes (for example, 10485760.) +# +# dogstatsd_log_file_max_size: 10MB + ## @param dogstatsd_queue_size - integer - optional - default: 1024 ## @env DD_DOGSTATSD_QUEUE_SIZE - integer - optional - default: 1024 ## Configure the internal queue size of the Dogstatsd server. diff --git a/pkg/config/log.go b/pkg/config/log.go index c56003c65958d..8a64d65d70116 100644 --- a/pkg/config/log.go +++ b/pkg/config/log.go @@ -30,8 +30,9 @@ type LoggerName string // Constant values for LoggerName. const ( - CoreLoggerName LoggerName = "CORE" - JMXLoggerName LoggerName = "JMXFETCH" + CoreLoggerName LoggerName = "CORE" + JMXLoggerName LoggerName = "JMXFETCH" + DogstatsDLoggerName LoggerName = "DOGSTATSD" ) type contextFormat uint8 @@ -45,8 +46,9 @@ const ( var syslogTLSConfig *tls.Config var ( - seelogConfig *seelogCfg.Config - jmxSeelogConfig *seelogCfg.Config + seelogConfig *seelogCfg.Config + jmxSeelogConfig *seelogCfg.Config + dogstatsdSeelogConfig *seelogCfg.Config ) func getLogDateFormat() string { @@ -132,6 +134,41 @@ func SetupJMXLogger(logFile, syslogURI string, syslogRFC, logToConsole, jsonForm return nil } +// SetupDogstatsdLogger sets up a logger with dogstatsd logger name and log level +// if a non empty logFile is provided, it will also log to the file +func SetupDogstatsdLogger(logFile string) (seelog.LoggerInterface, error) { + seelogLogLevel, err := validateLogLevel("info") + if err != nil { + return nil, err + } + + dogstatsdSeelogConfig = buildDogstatsdLoggerConfig(DogstatsDLoggerName, seelogLogLevel, logFile) + + dogstatsdLoggerInterface, err := GenerateLoggerInterface(dogstatsdSeelogConfig) + if err != nil { + return nil, err + } + + return dogstatsdLoggerInterface, nil + +} + +func buildDogstatsdLoggerConfig(loggerName LoggerName, seelogLogLevel, logFile string) *seelogCfg.Config { + config := seelogCfg.NewSeelogConfig(string(loggerName), seelogLogLevel, "common", "", buildCommonFormat(loggerName), false) + + // Configuring max roll for log file, if dogstatsd_log_file_max_rolls env var is not set (or set improperly ) within datadog.yaml then default value is 3 + dogstatsd_log_file_max_rolls := Datadog.GetInt("dogstatsd_log_file_max_rolls") + if dogstatsd_log_file_max_rolls < 0 { + dogstatsd_log_file_max_rolls = 3 + log.Warnf("Invalid value for dogstatsd_log_file_max_rolls, please make sure the value is equal or higher than 0") + } + + // Configure log file, log file max size, log file roll up + config.EnableFileLogging(logFile, Datadog.GetSizeInBytes("dogstatsd_log_file_max_size"), uint(dogstatsd_log_file_max_rolls)) + + return config +} + func buildLoggerConfig(loggerName LoggerName, seelogLogLevel, logFile, syslogURI string, syslogRFC, logToConsole, jsonFormat bool) (*seelogCfg.Config, error) { formatID := "common" if jsonFormat { diff --git a/pkg/config/system_probe.go b/pkg/config/system_probe.go index 011e79564e0d3..afecb47de3f23 100644 --- a/pkg/config/system_probe.go +++ b/pkg/config/system_probe.go @@ -9,6 +9,7 @@ import ( "encoding/json" "os" "path" + "path/filepath" "strings" "time" @@ -59,7 +60,15 @@ func InitSystemProbeConfig(cfg Config) { cfg.BindEnvAndSetDefault("ignore_host_etc", false) cfg.BindEnvAndSetDefault("go_core_dump", false) - setupSBOMConfig(cfg, "sbom-sysprobe") + // SBOM configuration + cfg.BindEnvAndSetDefault("sbom.host.enabled", false) + cfg.BindEnvAndSetDefault("sbom.host.analyzers", []string{"os"}) + cfg.BindEnvAndSetDefault("sbom.cache_directory", filepath.Join(defaultRunPath, "sbom-sysprobe")) + cfg.BindEnvAndSetDefault("sbom.clear_cache_on_exit", false) + cfg.BindEnvAndSetDefault("sbom.cache.enabled", false) + cfg.BindEnvAndSetDefault("sbom.cache.max_disk_size", 1000*1000*100) // used by custom cache: max disk space used by cached objects. Not equal to max disk usage + cfg.BindEnvAndSetDefault("sbom.cache.max_cache_entries", 10000) // used by custom cache keys stored in memory + cfg.BindEnvAndSetDefault("sbom.cache.clean_interval", "30m") // used by custom cache. // Auto exit configuration cfg.BindEnvAndSetDefault("auto_exit.validation_period", 60) @@ -110,8 +119,13 @@ func InitSystemProbeConfig(cfg Config) { cfg.BindEnvAndSetDefault(join(spNS, "internal_profiling.block_profile_rate"), 0) cfg.BindEnvAndSetDefault(join(spNS, "internal_profiling.enable_goroutine_stacktraces"), false) + cfg.BindEnvAndSetDefault(join(spNS, "memory_controller.enabled"), false) + cfg.BindEnvAndSetDefault(join(spNS, "memory_controller.hierarchy"), "v1") + cfg.BindEnvAndSetDefault(join(spNS, "memory_controller.pressure_levels"), map[string]string{}) + cfg.BindEnvAndSetDefault(join(spNS, "memory_controller.thresholds"), map[string]string{}) + // ebpf general settings - cfg.BindEnvAndSetDefault(join(spNS, "bpf_debug"), false) + cfg.BindEnvAndSetDefault(join(spNS, "bpf_debug"), false, "DD_SYSTEM_PROBE_CONFIG_BPF_DEBUG", "BPF_DEBUG") cfg.BindEnvAndSetDefault(join(spNS, "bpf_dir"), defaultSystemProbeBPFDir, "DD_SYSTEM_PROBE_BPF_DIR") cfg.BindEnvAndSetDefault(join(spNS, "java_dir"), defaultSystemProbeJavaDir, "DD_SYSTEM_PROBE_JAVA_DIR") cfg.BindEnvAndSetDefault(join(spNS, "excluded_linux_versions"), []string{}) @@ -139,6 +153,12 @@ func InitSystemProbeConfig(cfg Config) { cfg.BindEnvAndSetDefault(join(spNS, "disable_tcp"), false, "DD_DISABLE_TCP_TRACING") cfg.BindEnvAndSetDefault(join(spNS, "disable_udp"), false, "DD_DISABLE_UDP_TRACING") cfg.BindEnvAndSetDefault(join(spNS, "disable_ipv6"), false, "DD_DISABLE_IPV6_TRACING") + + cfg.SetDefault(join(netNS, "collect_tcp_v4"), true) + cfg.SetDefault(join(netNS, "collect_tcp_v6"), true) + cfg.SetDefault(join(netNS, "collect_udp_v4"), true) + cfg.SetDefault(join(netNS, "collect_udp_v6"), true) + cfg.BindEnvAndSetDefault(join(spNS, "offset_guess_threshold"), int64(defaultOffsetThreshold)) cfg.BindEnvAndSetDefault(join(spNS, "max_tracked_connections"), 65536) @@ -169,7 +189,11 @@ func InitSystemProbeConfig(cfg Config) { cfg.BindEnvAndSetDefault(join(spNS, "dest_excludes"), map[string][]string{}) // network_config namespace only + + // For backward compatibility cfg.BindEnv(join(netNS, "enable_http_monitoring"), "DD_SYSTEM_PROBE_NETWORK_ENABLE_HTTP_MONITORING") + cfg.BindEnv(join(smNS, "enable_http_monitoring")) + cfg.BindEnv(join(netNS, "enable_https_monitoring"), "DD_SYSTEM_PROBE_NETWORK_ENABLE_HTTPS_MONITORING") cfg.BindEnvAndSetDefault(join(smNS, "enable_go_tls_support"), false) @@ -274,12 +298,14 @@ func InitSystemProbeConfig(cfg Config) { cfg.BindEnvAndSetDefault("runtime_security_config.self_test.enabled", true) cfg.BindEnvAndSetDefault("runtime_security_config.self_test.send_report", true) cfg.BindEnvAndSetDefault("runtime_security_config.remote_configuration.enabled", false) + cfg.BindEnvAndSetDefault("runtime_security_config.direct_send_from_system_probe", false) // CWS - activity dump cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.enabled", true) cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.cleanup_period", "30s") cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.tags_resolution_period", "60s") cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.load_controller_period", "60s") + cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.min_timeout", "10m") cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.max_dump_size", 1750) cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.traced_cgroups_count", 5) cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.traced_event_types", []string{"exec", "open", "dns"}) @@ -307,6 +333,7 @@ func InitSystemProbeConfig(cfg Config) { cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.cache_size", 10) cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.max_count", 400) cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.remote_configuration.enabled", true) + cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.dns_match_max_depth", 0) // CWS - Anomaly detection cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.anomaly_detection.event_types", []string{"exec", "dns"}) @@ -314,7 +341,7 @@ func InitSystemProbeConfig(cfg Config) { cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.anomaly_detection.workload_warmup_period", "60s") cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.anomaly_detection.unstable_profile_time_threshold", "48h") cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.anomaly_detection.unstable_profile_size_threshold", 50000) - cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.anomaly_detection.rate_limiter", 5) + cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.anomaly_detection.rate_limiter", "5s") } func join(pieces ...string) string { diff --git a/pkg/ebpf/compiler/compiler_test.go b/pkg/ebpf/compiler/compiler_test.go deleted file mode 100644 index de55df7fca85f..0000000000000 --- a/pkg/ebpf/compiler/compiler_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build linux_bpf - -package compiler - -import ( - "fmt" - "io" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/DataDog/datadog-agent/pkg/ebpf" - "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode" -) - -func TestCompilerMatch(t *testing.T) { - cPath := "../network/ebpf/c/prebuilt/offset-guess.c" - if _, err := os.Stat(cPath); err != nil { - if os.IsNotExist(err) { - t.Skipf("compiler test must be run in source tree") - } else { - t.Fatalf("error checking for offset-guess.c: %s", err) - } - return - } - cfg := ebpf.NewConfig() - - cflags := []string{ - "-I./c", - "-I../network/ebpf/c", - "-includeasm_goto_workaround.h", - } - tmpObjFile, err := os.CreateTemp("", "offset-guess-static-*.o") - require.NoError(t, err) - defer os.Remove(tmpObjFile.Name()) - - onDiskObjFilename := tmpObjFile.Name() - err = CompileToObjectFile(cPath, onDiskObjFilename, cflags, nil) - require.NoError(t, err) - - bs, err := os.ReadFile(onDiskObjFilename) - require.NoError(t, err) - - bundleFilename := "offset-guess.o" - actualReader, err := bytecode.GetReader(cfg.BPFDir, bundleFilename) - require.NoError(t, err) - defer actualReader.Close() - - actual, err := io.ReadAll(actualReader) - require.NoError(t, err) - - assert.Equal(t, bs, actual, fmt.Sprintf("prebuilt file %s and statically-linked clang compiled content %s are different", bundleFilename, onDiskObjFilename)) -} diff --git a/pkg/ebpf/config.go b/pkg/ebpf/config.go index 1deb407942849..98f2025b09ae0 100644 --- a/pkg/ebpf/config.go +++ b/pkg/ebpf/config.go @@ -8,6 +8,7 @@ package ebpf import ( "strings" + sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" aconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/process/util" ) @@ -83,6 +84,10 @@ func key(pieces ...string) string { // NewConfig creates a config with ebpf-related settings func NewConfig() *Config { cfg := aconfig.SystemProbe + if !sysconfig.IsAdjusted(cfg) { + sysconfig.Adjust(cfg) + } + return &Config{ BPFDebug: cfg.GetBool(key(spNS, "bpf_debug")), BPFDir: cfg.GetString(key(spNS, "bpf_dir")), diff --git a/pkg/ebpf/debugfs.go b/pkg/ebpf/debugfs.go index c78fd932ef9a2..3f0f8baa9b17b 100644 --- a/pkg/ebpf/debugfs.go +++ b/pkg/ebpf/debugfs.go @@ -41,25 +41,25 @@ func init() { // KprobeStats is the count of hits and misses for a kprobe/kretprobe type KprobeStats struct { - Hits int64 - Misses int64 + Hits uint64 + Misses uint64 } // event name format is p|r___ var eventRegexp = regexp.MustCompile(`^((?:p|r)_.+?)_([^_]*)_([^_]*)$`) // GetProbeStats gathers stats about the # of kprobes triggered /missed by reading the kprobe_profile file -func GetProbeStats() map[string]int64 { +func GetProbeStats() map[string]uint64 { root, err := tracefs.Root() if err != nil { log.Debugf("error getting tracefs root path: %s", err) - return map[string]int64{} + return map[string]uint64{} } return getProbeStats(0, filepath.Join(root, "kprobe_profile")) } -func getProbeStats(pid int, profile string) map[string]int64 { +func getProbeStats(pid int, profile string) map[string]uint64 { if pid == 0 { pid = myPid } @@ -67,10 +67,10 @@ func getProbeStats(pid int, profile string) map[string]int64 { m, err := readKprobeProfile(profile) if err != nil { log.Debugf("error retrieving probe stats: %s", err) - return map[string]int64{} + return map[string]uint64{} } - res := make(map[string]int64, 2*len(m)) + res := make(map[string]uint64, 2*len(m)) for event, st := range m { parts := eventRegexp.FindStringSubmatch(event) if len(parts) > 2 { @@ -136,15 +136,15 @@ func readKprobeProfile(path string) (map[string]KprobeStats, error) { continue } - hits, err := strconv.ParseInt(fields[1], 10, 64) + hits, err := strconv.ParseUint(fields[1], 10, 64) if err != nil { - log.Debugf("error parsing kprobe_profile output for hits (%s): %s", fields[1], err) + log.Debugf("error parsing kprobe_profile output for probe %s hits (%s): %s", fields[0], fields[1], err) continue } - misses, err := strconv.ParseInt(fields[2], 10, 64) + misses, err := strconv.ParseUint(fields[2], 10, 64) if err != nil { - log.Debugf("error parsing kprobe_profile output for miss (%s): %s", fields[2], err) + log.Debugf("error parsing kprobe_profile output for probe %s miss (%s): %s", fields[0], fields[2], err) continue } diff --git a/pkg/ebpf/debugfs_test.go b/pkg/ebpf/debugfs_test.go index bb7bcdb4b902f..42090766286e1 100644 --- a/pkg/ebpf/debugfs_test.go +++ b/pkg/ebpf/debugfs_test.go @@ -35,11 +35,11 @@ func TestReadKprobeProfile(t *testing.T) { func TestGetProbeStats(t *testing.T) { stats := getProbeStats(7178, testProfile) - require.Equal(t, int64(1111389857), stats["r_tcp_sendmsg_hits"]) + require.Equal(t, uint64(1111389857), stats["r_tcp_sendmsg_hits"]) stats = getProbeStats(4256, testProfile) - require.Equal(t, int64(549926224), stats["r_tcp_sendmsg_hits"]) - require.Equal(t, int64(549925022), stats["p_tcp_sendmsg_hits"]) + require.Equal(t, uint64(549926224), stats["r_tcp_sendmsg_hits"]) + require.Equal(t, uint64(549925022), stats["p_tcp_sendmsg_hits"]) stats = getProbeStats(0, testProfile) require.Empty(t, stats) diff --git a/pkg/ebpf/ebpftest/buildmode.go b/pkg/ebpf/ebpftest/buildmode.go new file mode 100644 index 0000000000000..47e03d5fa4768 --- /dev/null +++ b/pkg/ebpf/ebpftest/buildmode.go @@ -0,0 +1,94 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package ebpftest + +import ( + "fmt" +) + +// TODO I don't love fentry as a buildmode here... +var ( + Prebuilt BuildMode + RuntimeCompiled BuildMode + CORE BuildMode + Fentry BuildMode +) + +func init() { + Prebuilt = prebuilt{} + RuntimeCompiled = runtimeCompiled{} + CORE = core{} + Fentry = fentry{} +} + +type BuildMode interface { + fmt.Stringer + Env() map[string]string +} + +type prebuilt struct{} + +func (p prebuilt) String() string { + return "prebuilt" +} + +func (p prebuilt) Env() map[string]string { + return map[string]string{ + "NETWORK_TRACER_FENTRY_TESTS": "false", + "DD_ENABLE_RUNTIME_COMPILER": "false", + "DD_ENABLE_CO_RE": "false", + "DD_ALLOW_RUNTIME_COMPILED_FALLBACK": "false", + "DD_ALLOW_PRECOMPILED_FALLBACK": "false", + } +} + +type runtimeCompiled struct{} + +func (r runtimeCompiled) String() string { + return "runtime compiled" +} + +func (r runtimeCompiled) Env() map[string]string { + return map[string]string{ + "NETWORK_TRACER_FENTRY_TESTS": "false", + "DD_ENABLE_RUNTIME_COMPILER": "true", + "DD_ENABLE_CO_RE": "false", + "DD_ALLOW_RUNTIME_COMPILED_FALLBACK": "false", + "DD_ALLOW_PRECOMPILED_FALLBACK": "false", + } +} + +type core struct{} + +func (c core) String() string { + return "CO-RE" +} + +func (c core) Env() map[string]string { + return map[string]string{ + "NETWORK_TRACER_FENTRY_TESTS": "false", + "DD_ENABLE_RUNTIME_COMPILER": "false", + "DD_ENABLE_CO_RE": "true", + "DD_ALLOW_RUNTIME_COMPILED_FALLBACK": "false", + "DD_ALLOW_PRECOMPILED_FALLBACK": "false", + } +} + +type fentry struct{} + +func (f fentry) String() string { + return "fentry" +} + +func (f fentry) Env() map[string]string { + return map[string]string{ + "NETWORK_TRACER_FENTRY_TESTS": "true", + "DD_ENABLE_RUNTIME_COMPILER": "false", + "DD_ENABLE_CO_RE": "true", + "DD_ALLOW_RUNTIME_COMPILED_FALLBACK": "false", + "DD_ALLOW_PRECOMPILED_FALLBACK": "false", + } +} diff --git a/pkg/ebpf/ebpftest/buildmode_linux.go b/pkg/ebpf/ebpftest/buildmode_linux.go new file mode 100644 index 0000000000000..cbccaa75020e5 --- /dev/null +++ b/pkg/ebpf/ebpftest/buildmode_linux.go @@ -0,0 +1,50 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package ebpftest + +import ( + "runtime" + "testing" + + "github.com/DataDog/gopsutil/host" + + "github.com/DataDog/datadog-agent/pkg/util/kernel" +) + +var hostinfo *host.InfoStat +var kv kernel.Version + +func init() { + kv, _ = kernel.HostVersion() + hostinfo, _ = host.Info() +} + +func SupportedBuildModes() []BuildMode { + modes := []BuildMode{Prebuilt, RuntimeCompiled, CORE} + if runtime.GOARCH == "amd64" && (hostinfo.Platform == "amazon" || hostinfo.Platform == "amzn") && kv.Major() == 5 && kv.Minor() == 10 { + modes = append(modes, Fentry) + } + return modes +} + +func TestBuildModes(t *testing.T, modes []BuildMode, name string, fn func(t *testing.T)) { + for _, mode := range modes { + TestBuildMode(t, mode, name, fn) + } +} + +func TestBuildMode(t *testing.T, mode BuildMode, name string, fn func(t *testing.T)) { + t.Run(mode.String(), func(t *testing.T) { + for k, v := range mode.Env() { + t.Setenv(k, v) + } + if name != "" { + t.Run(name, fn) + } else { + fn(t) + } + }) +} diff --git a/pkg/ebpf/ebpftest/buildmode_test.go b/pkg/ebpf/ebpftest/buildmode_test.go new file mode 100644 index 0000000000000..654ddeb92fb79 --- /dev/null +++ b/pkg/ebpf/ebpftest/buildmode_test.go @@ -0,0 +1,52 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux + +package ebpftest + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/DataDog/datadog-agent/pkg/ebpf" +) + +func TestBuildModeConstants(t *testing.T) { + TestBuildMode(t, Prebuilt, "", func(t *testing.T) { + cfg := ebpf.NewConfig() + assert.False(t, cfg.EnableRuntimeCompiler) + assert.False(t, cfg.EnableCORE) + assert.False(t, cfg.AllowPrecompiledFallback) + assert.False(t, cfg.AllowRuntimeCompiledFallback) + assert.Equal(t, "false", os.Getenv("NETWORK_TRACER_FENTRY_TESTS")) + }) + TestBuildMode(t, RuntimeCompiled, "", func(t *testing.T) { + cfg := ebpf.NewConfig() + assert.True(t, cfg.EnableRuntimeCompiler) + assert.False(t, cfg.EnableCORE) + assert.False(t, cfg.AllowPrecompiledFallback) + assert.False(t, cfg.AllowRuntimeCompiledFallback) + assert.Equal(t, "false", os.Getenv("NETWORK_TRACER_FENTRY_TESTS")) + }) + TestBuildMode(t, CORE, "", func(t *testing.T) { + cfg := ebpf.NewConfig() + assert.False(t, cfg.EnableRuntimeCompiler) + assert.True(t, cfg.EnableCORE) + assert.False(t, cfg.AllowPrecompiledFallback) + assert.False(t, cfg.AllowRuntimeCompiledFallback) + assert.Equal(t, "false", os.Getenv("NETWORK_TRACER_FENTRY_TESTS")) + }) + TestBuildMode(t, Fentry, "", func(t *testing.T) { + cfg := ebpf.NewConfig() + assert.False(t, cfg.EnableRuntimeCompiler) + assert.True(t, cfg.EnableCORE) + assert.False(t, cfg.AllowPrecompiledFallback) + assert.False(t, cfg.AllowRuntimeCompiledFallback) + assert.Equal(t, "true", os.Getenv("NETWORK_TRACER_FENTRY_TESTS")) + }) +} diff --git a/pkg/ebpf/ebpftest/buildmode_windows.go b/pkg/ebpf/ebpftest/buildmode_windows.go new file mode 100644 index 0000000000000..39e423ac673fc --- /dev/null +++ b/pkg/ebpf/ebpftest/buildmode_windows.go @@ -0,0 +1,35 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package ebpftest + +import "testing" + +func SupportedBuildModes() []BuildMode { + return []BuildMode{Prebuilt} +} + +func TestBuildModes(t *testing.T, modes []BuildMode, name string, fn func(t *testing.T)) { + // ignore provided modes and only use prebuilt + TestBuildMode(t, Prebuilt, name, fn) +} + +func TestBuildMode(t *testing.T, mode BuildMode, name string, fn func(t *testing.T)) { + if mode != Prebuilt { + t.Skipf("unsupported build mode %s", mode) + return + } + + t.Run(mode.String(), func(t *testing.T) { + for k, v := range mode.Env() { + t.Setenv(k, v) + } + if name != "" { + t.Run(name, fn) + } else { + fn(t) + } + }) +} diff --git a/pkg/epforwarder/epforwarder.go b/pkg/epforwarder/epforwarder.go index 046105c3e8b28..4d4c7b628bbb4 100644 --- a/pkg/epforwarder/epforwarder.go +++ b/pkg/epforwarder/epforwarder.go @@ -29,6 +29,7 @@ const ( eventTypeDBMSamples = "dbm-samples" eventTypeDBMMetrics = "dbm-metrics" eventTypeDBMActivity = "dbm-activity" + eventTypeDBMMetadata = "dbm-metadata" // EventTypeNetworkDevicesMetadata is the event type for network devices metadata EventTypeNetworkDevicesMetadata = "network-devices-metadata" @@ -69,6 +70,22 @@ var passthroughPipelineDescs = []passthroughPipelineDesc{ defaultBatchMaxSize: pkgconfig.DefaultBatchMaxSize, defaultInputChanSize: pkgconfig.DefaultInputChanSize, }, + { + eventType: eventTypeDBMMetadata, + contentType: http.JSONContentType, + // set the endpoint config to "metrics" since metadata will hit the same endpoint + // as metrics, so there is no need to add an extra config endpoint. + // As a follow-on PR, we should clean this up to have a single config for each track type since + // all of our data now flows through the same intake + endpointsConfigPrefix: "database_monitoring.metrics.", + hostnameEndpointPrefix: "dbm-metrics-intake.", + intakeTrackType: "dbmmetadata", + // raise the default batch_max_concurrent_send from 0 to 10 to ensure this pipeline is able to handle 4k events/s + defaultBatchMaxConcurrentSend: 10, + defaultBatchMaxContentSize: 20e6, + defaultBatchMaxSize: pkgconfig.DefaultBatchMaxSize, + defaultInputChanSize: pkgconfig.DefaultInputChanSize, + }, { eventType: eventTypeDBMActivity, contentType: http.JSONContentType, diff --git a/pkg/languagedetection/detector.go b/pkg/languagedetection/detector.go new file mode 100644 index 0000000000000..5f9c4cd85e1cb --- /dev/null +++ b/pkg/languagedetection/detector.go @@ -0,0 +1,90 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package languagedetection + +import ( + "strings" + + "github.com/DataDog/datadog-agent/pkg/process/procutil" +) + +// LanguageName is a string enum that represents a detected language name. +type LanguageName string + +const ( + Node LanguageName = "node" + Dotnet LanguageName = "dotnet" + Python LanguageName = "python" + Java LanguageName = "java" + Unknown LanguageName = "" +) + +// Language contains metadata collected from the call to `DetectLanguage` +type Language struct { + Name LanguageName +} + +type languageFromCLI struct { + name LanguageName + validator func(exe string) bool +} + +// knownPrefixes maps languages names to their prefix +var knownPrefixes = map[string]languageFromCLI{ + "python": {name: Python}, + "java": {name: Java, validator: func(exe string) bool { + if exe == "javac" { + return false + } + return true + }}, +} + +// exactMatches maps an exact exe name match to a prefix +var exactMatches = map[string]languageFromCLI{ + "py": {name: Python}, + "python": {name: Python}, + + "java": {name: Java}, + + "npm": {name: Node}, + "node": {name: Node}, + + "dotnet": {name: Dotnet}, +} + +func languageNameFromCommandLine(cmdline []string) LanguageName { + exe := getExe(cmdline) + + // First check to see if there is an exact match + if lang, ok := exactMatches[exe]; ok { + return lang.name + } + + for prefix, language := range knownPrefixes { + if strings.HasPrefix(exe, prefix) { + if language.validator != nil { + isValidResult := language.validator(exe) + if !isValidResult { + continue + } + } + return language.name + } + } + + return Unknown +} + +// DetectLanguage uses a combination of commandline parsing and binary analysis to detect a process' language +func DetectLanguage(procs []*procutil.Process) []*Language { + langs := make([]*Language, len(procs)) + for i, proc := range procs { + languageName := languageNameFromCommandLine(proc.Cmdline) + langs[i] = &Language{Name: languageName} + } + return langs +} diff --git a/pkg/languagedetection/detector_nix_test.go b/pkg/languagedetection/detector_nix_test.go new file mode 100644 index 0000000000000..3d81da7f1cf3b --- /dev/null +++ b/pkg/languagedetection/detector_nix_test.go @@ -0,0 +1,170 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !windows + +package languagedetection + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/DataDog/datadog-agent/pkg/process/procutil" +) + +func makeProcess(cmdline []string) *procutil.Process { + return &procutil.Process{ + Pid: rand.Int31(), + Cmdline: cmdline, + } +} + +func TestLanguageFromCommandline(t *testing.T) { + for _, tc := range []struct { + name string + cmdline []string + expected LanguageName + }{ + { + name: "python2", + cmdline: []string{"/opt/Python/2.7.11/bin/python2.7", "/opt/foo/bar/baz", "--config=asdf"}, + expected: Python, + }, + { + name: "Java", + cmdline: []string{"/usr/bin/Java", "-Xfoo=true", "org.elasticsearch.bootstrap.Elasticsearch"}, + expected: Java, + }, + { + name: "Unknown", + cmdline: []string{"mine-bitcoins", "--all"}, + expected: Unknown, + }, + { + name: "Python with space and special chars in path", + cmdline: []string{"//..//path/\"\\ to/Python", "asdf"}, + expected: Python, + }, + { + name: "args in first element", + cmdline: []string{"/usr/bin/Python myapp.py --config=/etc/mycfg.yaml"}, + expected: Python, + }, + { + name: "javac is not Java", + cmdline: []string{"javac", "main.Java"}, + expected: Unknown, + }, + { + name: "py is Python", + cmdline: []string{"py", "test.py"}, + expected: Python, + }, + { + name: "py is not a prefix", + cmdline: []string{"pyret", "main.pyret"}, + expected: Unknown, + }, + { + name: "node", + cmdline: []string{"node", "/etc/app/index.js"}, + expected: Node, + }, + { + name: "npm", + cmdline: []string{"npm", "start"}, + expected: Node, + }, + { + name: "dotnet", + cmdline: []string{"dotnet", "myApp"}, + expected: Dotnet, + }, + } { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.expected, languageNameFromCommandLine(tc.cmdline)) + }) + } +} + +func TestGetExe(t *testing.T) { + type test struct { + name string + cmdline []string + expected string + } + + for _, tc := range []test{ + { + name: "blank", + cmdline: []string{}, + expected: "", + }, + { + name: "python", + cmdline: []string{"/usr/bin/python", "test.py"}, + expected: "python", + }, + { + name: "numeric ending", + cmdline: []string{"/usr/bin/python3.9", "test.py"}, + expected: "python3.9", + }, + { + name: "packed args", + cmdline: []string{"java -jar Test.jar"}, + expected: "java", + }, + { + name: "uppercase", + cmdline: []string{"/usr/bin/MyBinary"}, + expected: "mybinary", + }, + { + name: "dont trim .exe on linux", + cmdline: []string{"/usr/bin/helloWorld.exe"}, + expected: "helloworld.exe", + }, + } { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.expected, getExe(tc.cmdline)) + }) + } +} + +func BenchmarkDetectLanguage(b *testing.B) { + commands := [][]string{ + {"Python", "--version"}, + {"python3", "--version"}, + {"py", "--version"}, + {"Python", "-c", "import platform; print(platform.python_version())"}, + {"python3", "-c", "import platform; print(platform.python_version())"}, + {"py", "-c", "import platform; print(platform.python_version())"}, + {"Python", "-c", "import sys; print(sys.version)"}, + {"python3", "-c", "import sys; print(sys.version)"}, + {"py", "-c", "import sys; print(sys.version)"}, + {"Python", "-c", "print('Python')"}, + {"python3", "-c", "print('Python')"}, + {"py", "-c", "print('Python')"}, + {"Java", "-version"}, + {"Java", "-jar", "myapp.jar"}, + {"Java", "-cp", ".", "MyClass"}, + {"javac", "MyClass.Java"}, + {"javap", "-c", "MyClass"}, + } + + var procs []*procutil.Process + for _, command := range commands { + procs = append(procs, makeProcess(command)) + } + + b.StartTimer() + + for i := 0; i < b.N; i++ { + DetectLanguage(procs) + } +} diff --git a/pkg/languagedetection/detector_windows_test.go b/pkg/languagedetection/detector_windows_test.go new file mode 100644 index 0000000000000..76b5a2b2bf7a3 --- /dev/null +++ b/pkg/languagedetection/detector_windows_test.go @@ -0,0 +1,72 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build windows + +package languagedetection + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestLanguageFromCommandline(t *testing.T) { + for _, tc := range []struct { + name string + cmdline []string + expected LanguageName + error bool + }{ + { + name: "Python", + cmdline: []string{"C:\\Program Files\\Python3.9\\Python.exe", "test.py"}, + expected: Python, + }, + { + name: "Java", + cmdline: []string{"C:\\Program Files\\Java\\Java.exe", "main.Java"}, + expected: Java, + }, + { + name: "ingore javac", + cmdline: []string{"C:\\Program Files\\Java\\javac.exe", "main.Java"}, + expected: Unknown, + }, + { + name: "dotnet", + cmdline: []string{"dotnet", "BankApp.dll"}, + expected: Dotnet, + }, + } { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.expected, languageNameFromCommandLine(tc.cmdline)) + }) + } +} + +func TestGetExe(t *testing.T) { + type test struct { + name string + cmdline []string + expected string + } + for _, tc := range []test{ + { + name: "windows", + cmdline: []string{"C:\\Program Files\\Python\\python.exe", "test.py"}, + expected: "python", + }, + { + name: "quotes", + cmdline: []string{"\"C:\\Program Files\\Python\\python.exe\"", "test.py"}, + expected: "python", + }, + } { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.expected, getExe(tc.cmdline)) + }) + } +} diff --git a/pkg/languagedetection/parsing.go b/pkg/languagedetection/parsing.go new file mode 100644 index 0000000000000..bb83553a342c4 --- /dev/null +++ b/pkg/languagedetection/parsing.go @@ -0,0 +1,73 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package languagedetection + +import ( + "path/filepath" + "runtime" + "strings" + "unicode" +) + +func isRuneAlphanumeric(s string, position int) bool { + return len(s) > position && (unicode.IsLetter(rune(s[position])) || unicode.IsNumber(rune(s[position]))) +} + +// parseExeStartWithSymbol deals with exe that starts with special chars like "(", "-" or "[" +func parseExeStartWithSymbol(exe string) string { + if exe == "" { + return exe + } + // drop the first character + result := exe[1:] + // if last character is also special character, also drop it + if result != "" && !isRuneAlphanumeric(result, len(result)-1) { + result = result[:len(result)-1] + } + return result +} + +func getExe(cmd []string) string { + if len(cmd) == 0 { + return "" + } + + exe := cmd[0] + // check if all args are packed into the first argument + if len(cmd) == 1 { + if idx := strings.IndexRune(exe, ' '); idx != -1 { + exe = exe[0:idx] + } + } + + // trim any quotes from the executable + exe = strings.Trim(exe, "\"") + + // Extract executable from commandline args + exe = removeFilePath(exe) + if !isRuneAlphanumeric(exe, 0) { + exe = parseExeStartWithSymbol(exe) + } + + // For windows executables, trim the .exe suffix if there is one + if runtime.GOOS == "windows" { + exe = strings.TrimSuffix(exe, ".exe") + } + + // Lowercase the exe so that we don't need to worry about case sensitivity + exe = strings.ToLower(exe) + + return exe +} + +// removeFilePath removes the base path from the string +// Note that it's behavior is os dependent +func removeFilePath(s string) string { + if s != "" { + return filepath.Base(s) + } + return s +} diff --git a/pkg/logs/internal/launchers/container/tailerfactory/file.go b/pkg/logs/internal/launchers/container/tailerfactory/file.go index d09b1a584bd2d..1d996f08cb3bb 100644 --- a/pkg/logs/internal/launchers/container/tailerfactory/file.go +++ b/pkg/logs/internal/launchers/container/tailerfactory/file.go @@ -120,13 +120,16 @@ func (tf *factory) makeDockerFileSource(source *sources.LogSource) (*sources.Log // New file source that inherits most of its parent's properties fileSource := sources.NewLogSource(source.Name, &config.LogsConfig{ - Type: config.FileType, - Identifier: containerID, - Path: path, - Service: serviceName, - Source: sourceName, - Tags: source.Config.Tags, - ProcessingRules: source.Config.ProcessingRules, + Type: config.FileType, + Identifier: containerID, + Path: path, + Service: serviceName, + Source: sourceName, + Tags: source.Config.Tags, + ProcessingRules: source.Config.ProcessingRules, + AutoMultiLine: source.Config.AutoMultiLine, + AutoMultiLineSampleSize: source.Config.AutoMultiLineSampleSize, + AutoMultiLineMatchThreshold: source.Config.AutoMultiLineMatchThreshold, }) // inform the file launcher that it should expect docker-formatted content @@ -203,13 +206,16 @@ func (tf *factory) makeK8sFileSource(source *sources.LogSource) (*sources.LogSou fileSource := sources.NewLogSource( fmt.Sprintf("%s/%s/%s", pod.Namespace, pod.Name, container.Name), &config.LogsConfig{ - Type: config.FileType, - Identifier: containerID, - Path: path, - Service: serviceName, - Source: sourceName, - Tags: source.Config.Tags, - ProcessingRules: source.Config.ProcessingRules, + Type: config.FileType, + Identifier: containerID, + Path: path, + Service: serviceName, + Source: sourceName, + Tags: source.Config.Tags, + ProcessingRules: source.Config.ProcessingRules, + AutoMultiLine: source.Config.AutoMultiLine, + AutoMultiLineSampleSize: source.Config.AutoMultiLineSampleSize, + AutoMultiLineMatchThreshold: source.Config.AutoMultiLineMatchThreshold, }) switch source.Config.Type { diff --git a/pkg/logs/internal/launchers/container/tailerfactory/file_test.go b/pkg/logs/internal/launchers/container/tailerfactory/file_test.go index 780ee10aca9c1..81d5c9e296765 100644 --- a/pkg/logs/internal/launchers/container/tailerfactory/file_test.go +++ b/pkg/logs/internal/launchers/container/tailerfactory/file_test.go @@ -22,6 +22,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/pipeline" "github.com/DataDog/datadog-agent/pkg/logs/sources" dockerutilPkg "github.com/DataDog/datadog-agent/pkg/util/docker" + "github.com/DataDog/datadog-agent/pkg/util/pointer" "github.com/DataDog/datadog-agent/pkg/workloadmeta" ) @@ -85,11 +86,14 @@ func TestMakeFileSource_docker_success(t *testing.T) { cop: containersorpods.NewDecidedChooser(containersorpods.LogContainers), } source := sources.NewLogSource("test", &config.LogsConfig{ - Type: "docker", - Identifier: "abc", - Source: "src", - Service: "svc", - Tags: []string{"tag!"}, + Type: "docker", + Identifier: "abc", + Source: "src", + Service: "svc", + Tags: []string{"tag!"}, + AutoMultiLine: pointer.Ptr(true), + AutoMultiLineSampleSize: 123, + AutoMultiLineMatchThreshold: 0.123, }) child, err := tf.makeFileSource(source) require.NoError(t, err) @@ -101,6 +105,9 @@ func TestMakeFileSource_docker_success(t *testing.T) { require.Equal(t, source.Config.Service, child.Config.Service) require.Equal(t, source.Config.Tags, child.Config.Tags) require.Equal(t, sources.DockerSourceType, child.GetSourceType()) + require.Equal(t, *source.Config.AutoMultiLine, true) + require.Equal(t, source.Config.AutoMultiLineSampleSize, 123) + require.Equal(t, source.Config.AutoMultiLineMatchThreshold, 0.123) } func TestMakeFileSource_docker_no_file(t *testing.T) { @@ -179,11 +186,14 @@ func TestMakeK8sSource(t *testing.T) { for _, sourceConfigType := range []string{"docker", "containerd"} { t.Run("source.Config.Type="+sourceConfigType, func(t *testing.T) { source := sources.NewLogSource("test", &config.LogsConfig{ - Type: sourceConfigType, - Identifier: "abc", - Source: "src", - Service: "svc", - Tags: []string{"tag!"}, + Type: sourceConfigType, + Identifier: "abc", + Source: "src", + Service: "svc", + Tags: []string{"tag!"}, + AutoMultiLine: pointer.Ptr(true), + AutoMultiLineSampleSize: 123, + AutoMultiLineMatchThreshold: 0.123, }) child, err := tf.makeK8sFileSource(source) require.NoError(t, err) @@ -194,6 +204,9 @@ func TestMakeK8sSource(t *testing.T) { require.Equal(t, "src", child.Config.Source) require.Equal(t, "svc", child.Config.Service) require.Equal(t, []string{"tag!"}, child.Config.Tags) + require.Equal(t, *child.Config.AutoMultiLine, true) + require.Equal(t, child.Config.AutoMultiLineSampleSize, 123) + require.Equal(t, child.Config.AutoMultiLineMatchThreshold, 0.123) switch sourceConfigType { case "docker": require.Equal(t, sources.DockerSourceType, child.GetSourceType()) diff --git a/pkg/logs/internal/launchers/file/provider/file_provider.go b/pkg/logs/internal/launchers/file/provider/file_provider.go index 062c3918dd4d3..6b7074c073c23 100644 --- a/pkg/logs/internal/launchers/file/provider/file_provider.go +++ b/pkg/logs/internal/launchers/file/provider/file_provider.go @@ -74,10 +74,11 @@ const ( // FileProvider implements the logic to retrieve at most filesLimit Files defined in sources type FileProvider struct { - filesLimit int - wildcardOrder wildcardOrdering - selectionMode selectionStrategy - shouldLogErrors bool + filesLimit int + wildcardOrder wildcardOrdering + selectionMode selectionStrategy + shouldLogErrors bool + reachedNumFileLimit bool } // NewFileProvider returns a new Provider @@ -90,10 +91,11 @@ func NewFileProvider(filesLimit int, wildcardSelection WildcardSelectionStrategy } return &FileProvider{ - filesLimit: filesLimit, - wildcardOrder: wildcardOrder, - selectionMode: selectionMode, - shouldLogErrors: true, + filesLimit: filesLimit, + wildcardOrder: wildcardOrder, + selectionMode: selectionMode, + shouldLogErrors: true, + reachedNumFileLimit: false, } } @@ -225,8 +227,11 @@ func (p *FileProvider) FilesToTail(validatePodContainerID bool, inputSources []* source.Messages.AddMessage(source.Config.Path, fmt.Sprintf("%d files tailed out of %d files matching", matchCnt.tracked, matchCnt.total)) } - if len(filesToTail) == p.filesLimit { + if !p.reachedNumFileLimit && len(filesToTail) == p.filesLimit { log.Warn("Reached the limit on the maximum number of files in use: ", p.filesLimit) + p.reachedNumFileLimit = true + } else if len(filesToTail) < p.filesLimit { + p.reachedNumFileLimit = false } return filesToTail diff --git a/pkg/metadata/host/host_windows.go b/pkg/metadata/host/host_windows.go index 8adbbd7437719..c0e671acd2a4d 100644 --- a/pkg/metadata/host/host_windows.go +++ b/pkg/metadata/host/host_windows.go @@ -17,8 +17,8 @@ import ( "strings" "time" - "github.com/DataDog/gohai/cpu" - "github.com/DataDog/gohai/platform" + "github.com/DataDog/datadog-agent/pkg/gohai/cpu" + "github.com/DataDog/datadog-agent/pkg/gohai/platform" "github.com/shirou/w32" "golang.org/x/sys/windows" diff --git a/pkg/metadata/internal/gohai/gohai.go b/pkg/metadata/internal/gohai/gohai.go index 14c1ebb7c305d..f283d576c975c 100644 --- a/pkg/metadata/internal/gohai/gohai.go +++ b/pkg/metadata/internal/gohai/gohai.go @@ -9,11 +9,11 @@ import ( "net" "sync" - "github.com/DataDog/gohai/cpu" - "github.com/DataDog/gohai/filesystem" - "github.com/DataDog/gohai/memory" - "github.com/DataDog/gohai/network" - "github.com/DataDog/gohai/platform" + "github.com/DataDog/datadog-agent/pkg/gohai/cpu" + "github.com/DataDog/datadog-agent/pkg/gohai/filesystem" + "github.com/DataDog/datadog-agent/pkg/gohai/memory" + "github.com/DataDog/datadog-agent/pkg/gohai/network" + "github.com/DataDog/datadog-agent/pkg/gohai/platform" "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/util/log" diff --git a/pkg/metadata/internal/resources/resources.go b/pkg/metadata/internal/resources/resources.go index d77fea9a3e473..16e9f3e477ad2 100644 --- a/pkg/metadata/internal/resources/resources.go +++ b/pkg/metadata/internal/resources/resources.go @@ -8,7 +8,7 @@ package resources import ( - "github.com/DataDog/gohai/processes" + "github.com/DataDog/datadog-agent/pkg/gohai/processes" "github.com/DataDog/datadog-agent/pkg/util/log" ) diff --git a/pkg/metadata/inventories/host_metadata.go b/pkg/metadata/inventories/host_metadata.go index 1adef793032d6..52d01b51bb683 100644 --- a/pkg/metadata/inventories/host_metadata.go +++ b/pkg/metadata/inventories/host_metadata.go @@ -6,10 +6,10 @@ package inventories import ( - "github.com/DataDog/gohai/cpu" - "github.com/DataDog/gohai/memory" - "github.com/DataDog/gohai/network" - "github.com/DataDog/gohai/platform" + "github.com/DataDog/datadog-agent/pkg/gohai/cpu" + "github.com/DataDog/datadog-agent/pkg/gohai/memory" + "github.com/DataDog/datadog-agent/pkg/gohai/network" + "github.com/DataDog/datadog-agent/pkg/gohai/platform" "github.com/DataDog/datadog-agent/pkg/util/dmi" "github.com/DataDog/datadog-agent/pkg/version" diff --git a/pkg/metadata/inventories/host_metadata_test.go b/pkg/metadata/inventories/host_metadata_test.go index f3bf7db016350..dbb6d76c20adc 100644 --- a/pkg/metadata/inventories/host_metadata_test.go +++ b/pkg/metadata/inventories/host_metadata_test.go @@ -9,10 +9,10 @@ import ( "fmt" "testing" - "github.com/DataDog/gohai/cpu" - "github.com/DataDog/gohai/memory" - "github.com/DataDog/gohai/network" - "github.com/DataDog/gohai/platform" + "github.com/DataDog/datadog-agent/pkg/gohai/cpu" + "github.com/DataDog/datadog-agent/pkg/gohai/memory" + "github.com/DataDog/datadog-agent/pkg/gohai/network" + "github.com/DataDog/datadog-agent/pkg/gohai/platform" "github.com/stretchr/testify/assert" "github.com/DataDog/datadog-agent/pkg/util/dmi" diff --git a/pkg/metadata/inventories/inventories.go b/pkg/metadata/inventories/inventories.go index f4bbc5828adcc..f68fbe769fe90 100644 --- a/pkg/metadata/inventories/inventories.go +++ b/pkg/metadata/inventories/inventories.go @@ -426,7 +426,7 @@ func initializeConfig(cfg config.Config) { SetAgentMetadata(AgentProcessEnabled, config.Datadog.GetBool("process_config.process_collection.enabled")) SetAgentMetadata(AgentProcessesContainerEnabled, config.Datadog.GetBool("process_config.container_collection.enabled")) SetAgentMetadata(AgentNetworksEnabled, config.SystemProbe.GetBool("network_config.enabled")) - SetAgentMetadata(AgentNetworksHTTPEnabled, config.SystemProbe.GetBool("network_config.enable_http_monitoring")) + SetAgentMetadata(AgentNetworksHTTPEnabled, config.SystemProbe.GetBool("service_monitoring_config.enable_http_monitoring")) SetAgentMetadata(AgentNetworksHTTPSEnabled, config.SystemProbe.GetBool("network_config.enable_https_monitoring")) SetAgentMetadata(AgentUSMKafkaEnabled, config.Datadog.GetBool("data_streams_config.enabled")) SetAgentMetadata(AgentRemoteConfigEnabled, config.Datadog.GetBool("remote_configuration.enabled")) diff --git a/pkg/netflow/common/flow.go b/pkg/netflow/common/flow.go index 6ed2c25634675..aadfbc46ffb7b 100644 --- a/pkg/netflow/common/flow.go +++ b/pkg/netflow/common/flow.go @@ -16,6 +16,7 @@ import ( type Flow struct { Namespace string FlowType FlowType + SequenceNum uint32 SamplingRate uint64 Direction uint32 diff --git a/pkg/netflow/common/utils.go b/pkg/netflow/common/utils.go index a4082c715847b..9fdcfa7910714 100644 --- a/pkg/netflow/common/utils.go +++ b/pkg/netflow/common/utils.go @@ -25,6 +25,14 @@ func MaxUint64(a uint64, b uint64) uint64 { return b } +// MaxUint32 returns the max of the two passed number +func MaxUint32(a uint32, b uint32) uint32 { + if a > b { + return a + } + return b +} + // MaxUint16 returns the max of the two passed number func MaxUint16(a uint16, b uint16) uint16 { if a > b { diff --git a/pkg/netflow/common/utils_test.go b/pkg/netflow/common/utils_test.go index 51ad9537ddcab..6788dd833666e 100644 --- a/pkg/netflow/common/utils_test.go +++ b/pkg/netflow/common/utils_test.go @@ -26,6 +26,11 @@ func TestMaxUint16(t *testing.T) { assert.Equal(t, uint16(10), MaxUint16(uint16(5), uint16(10))) } +func TestMaxUint32(t *testing.T) { + assert.Equal(t, uint32(10), MaxUint32(uint32(10), uint32(5))) + assert.Equal(t, uint32(10), MaxUint32(uint32(5), uint32(10))) +} + func TestIPBytesToString(t *testing.T) { assert.Equal(t, "0.0.0.0", IPBytesToString([]byte{0, 0, 0, 0})) assert.Equal(t, "1.2.3.4", IPBytesToString([]byte{1, 2, 3, 4})) diff --git a/pkg/netflow/flowaggregator/aggregator.go b/pkg/netflow/flowaggregator/aggregator.go index ffe28da2b3f1b..8f9f8a4ab6d28 100644 --- a/pkg/netflow/flowaggregator/aggregator.go +++ b/pkg/netflow/flowaggregator/aggregator.go @@ -7,7 +7,9 @@ package flowaggregator import ( "encoding/json" + "net" "strings" + "sync" "time" "github.com/prometheus/client_golang/prometheus" @@ -44,7 +46,30 @@ type FlowAggregator struct { flushedFlowCount *atomic.Uint64 hostname string goflowPrometheusGatherer prometheus.Gatherer - timeNowFunction func() time.Time // Allows to mock time in tests + TimeNowFunction func() time.Time // Allows to mock time in tests + + lastSequencePerExporter map[SequenceDeltaKey]uint32 + lastSequencePerExporterMu sync.Mutex +} + +type SequenceDeltaKey struct { + Namespace string + ExporterIP string + FlowType common.FlowType +} + +type SequenceDeltaValue struct { + Delta int64 + LastSequence uint32 + Reset bool +} + +// maxNegativeSequenceDiffToReset are thresholds used to detect sequence reset +var maxNegativeSequenceDiffToReset = map[common.FlowType]int{ + common.TypeSFlow5: -1000, + common.TypeNetFlow5: -1000, + common.TypeNetFlow9: -100, + common.TypeIPFIX: -100, } // NewFlowAggregator returns a new FlowAggregator @@ -66,7 +91,8 @@ func NewFlowAggregator(sender aggregator.Sender, epForwarder epforwarder.EventPl flushedFlowCount: atomic.NewUint64(0), hostname: hostname, goflowPrometheusGatherer: prometheus.DefaultGatherer, - timeNowFunction: time.Now, + TimeNowFunction: time.Now, + lastSequencePerExporter: make(map[SequenceDeltaKey]uint32), } } @@ -103,9 +129,9 @@ func (agg *FlowAggregator) run() { } } -func (agg *FlowAggregator) sendFlows(flows []*common.Flow) { +func (agg *FlowAggregator) sendFlows(flows []*common.Flow, flushTime time.Time) { for _, flow := range flows { - flowPayload := buildPayload(flow, agg.hostname) + flowPayload := buildPayload(flow, agg.hostname, flushTime) payloadBytes, err := json.Marshal(flowPayload) if err != nil { log.Errorf("Error marshalling device metadata: %s", err) @@ -216,13 +242,23 @@ func (agg *FlowAggregator) flushLoop() { // Flush flushes the aggregator func (agg *FlowAggregator) flush() int { flowsContexts := agg.flowAcc.getFlowContextCount() - flushTime := agg.timeNowFunction() + flushTime := agg.TimeNowFunction() flowsToFlush := agg.flowAcc.flush() log.Debugf("Flushing %d flows to the forwarder (flush_duration=%d, flow_contexts_before_flush=%d)", len(flowsToFlush), time.Since(flushTime).Milliseconds(), flowsContexts) + sequenceDeltaPerExporter := agg.getSequenceDelta(flowsToFlush) + for key, seqDelta := range sequenceDeltaPerExporter { + tags := []string{"device_namespace:" + key.Namespace, "exporter_ip:" + key.ExporterIP, "flow_type:" + string(key.FlowType)} + agg.sender.Count("datadog.netflow.aggregator.sequence.delta", float64(seqDelta.Delta), "", tags) + agg.sender.Gauge("datadog.netflow.aggregator.sequence.last", float64(seqDelta.LastSequence), "", tags) + if seqDelta.Reset { + agg.sender.Count("datadog.netflow.aggregator.sequence.reset", float64(1), "", tags) + } + } + // TODO: Add flush stats to agent telemetry e.g. aggregator newFlushCountStats() if len(flowsToFlush) > 0 { - agg.sendFlows(flowsToFlush) + agg.sendFlows(flowsToFlush, flushTime) } agg.sendExporterMetadata(flowsToFlush, flushTime) @@ -248,6 +284,50 @@ func (agg *FlowAggregator) flush() int { return len(flowsToFlush) } +// getSequenceDelta return the delta of current sequence number compared to previously saved sequence number +// Since we track per exporterIP, the returned delta is only accurate when for the specific exporterIP there is +// only one NetFlow9/IPFIX observation domain, NetFlow5 engineType/engineId, sFlow agent/subagent. +func (agg *FlowAggregator) getSequenceDelta(flowsToFlush []*common.Flow) map[SequenceDeltaKey]SequenceDeltaValue { + maxSequencePerExporter := make(map[SequenceDeltaKey]uint32) + for _, flow := range flowsToFlush { + key := SequenceDeltaKey{ + Namespace: flow.Namespace, + ExporterIP: net.IP(flow.ExporterAddr).String(), + FlowType: flow.FlowType, + } + if flow.SequenceNum > maxSequencePerExporter[key] { + maxSequencePerExporter[key] = flow.SequenceNum + } + } + sequenceDeltaPerExporter := make(map[SequenceDeltaKey]SequenceDeltaValue) + + agg.lastSequencePerExporterMu.Lock() + defer agg.lastSequencePerExporterMu.Unlock() + for key, seqnum := range maxSequencePerExporter { + lastSeq, prevExist := agg.lastSequencePerExporter[key] + delta := int64(0) + if prevExist { + delta = int64(seqnum) - int64(lastSeq) + } + maxNegSeqDiff := maxNegativeSequenceDiffToReset[key.FlowType] + reset := delta < int64(maxNegSeqDiff) + log.Debugf("[getSequenceDelta] key=%s, seqnum=%d, delta=%d, last=%d, reset=%t", key, seqnum, delta, agg.lastSequencePerExporter[key], reset) + seqDeltaValue := SequenceDeltaValue{LastSequence: seqnum} + if reset { // sequence reset + seqDeltaValue.Delta = int64(seqnum) + seqDeltaValue.Reset = reset + agg.lastSequencePerExporter[key] = seqnum + } else if delta < 0 { + seqDeltaValue.Delta = 0 + } else { + seqDeltaValue.Delta = delta + agg.lastSequencePerExporter[key] = seqnum + } + sequenceDeltaPerExporter[key] = seqDeltaValue + } + return sequenceDeltaPerExporter +} + func (agg *FlowAggregator) rollupTrackersRefresh() { log.Debugf("Rollup tracker refresh: use new store as current store") agg.flowAcc.portRollup.UseNewStoreAsCurrentStore() diff --git a/pkg/netflow/flowaggregator/aggregator_test.go b/pkg/netflow/flowaggregator/aggregator_test.go index 69469db31afa7..8c037849a36bf 100644 --- a/pkg/netflow/flowaggregator/aggregator_test.go +++ b/pkg/netflow/flowaggregator/aggregator_test.go @@ -41,7 +41,7 @@ import ( func TestAggregator(t *testing.T) { stoppedMu := sync.RWMutex{} // Mutex needed to avoid race condition in test - + flushTime, _ := time.Parse(time.RFC3339, "2019-02-18T16:00:06Z") sender := mocksender.NewMockSender("") sender.On("Gauge", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return() sender.On("Count", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return() @@ -84,6 +84,7 @@ func TestAggregator(t *testing.T) { // language=json event := []byte(` { + "flush_timestamp": 1550505606000, "type": "netflow9", "sampling_rate": 0, "direction": "ingress", @@ -159,9 +160,8 @@ func TestAggregator(t *testing.T) { aggregator := NewFlowAggregator(sender, epForwarder, &conf, "my-hostname") aggregator.flushFlowsToSendInterval = 1 * time.Second - aggregator.timeNowFunction = func() time.Time { - t, _ := time.Parse(time.RFC3339, "2019-02-18T16:00:06Z") - return t + aggregator.TimeNowFunction = func() time.Time { + return flushTime } inChan := aggregator.GetFlowInChan() @@ -209,7 +209,8 @@ stopLoop: } func TestAggregator_withMockPayload(t *testing.T) { - port := uint16(52056) + port := testutil.GetFreePort() + flushTime, _ := time.Parse(time.RFC3339, "2019-02-18T16:00:06Z") sender := mocksender.NewMockSender("") sender.On("Gauge", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return() @@ -258,9 +259,8 @@ func TestAggregator_withMockPayload(t *testing.T) { aggregator := NewFlowAggregator(sender, epForwarder, &conf, "my-hostname") aggregator.flushFlowsToSendInterval = 1 * time.Second - aggregator.timeNowFunction = func() time.Time { - t, _ := time.Parse(time.RFC3339, "2019-02-18T16:00:06Z") - return t + aggregator.TimeNowFunction = func() time.Time { + return flushTime } stoppedFlushLoop := make(chan struct{}) @@ -295,11 +295,13 @@ func TestAggregator_withMockPayload(t *testing.T) { sender.AssertMetric(t, "Gauge", "datadog.netflow.aggregator.port_rollup.new_store_size", 4, "", nil) sender.AssertMetric(t, "Gauge", "datadog.netflow.aggregator.input_buffer.capacity", 20, "", nil) sender.AssertMetric(t, "Gauge", "datadog.netflow.aggregator.input_buffer.length", 0, "", nil) + sender.AssertMetric(t, "Count", "datadog.netflow.aggregator.sequence.delta", 0, "", []string{"exporter_ip:127.0.0.1", "device_namespace:default", "flow_type:netflow5"}) + sender.AssertMetric(t, "Gauge", "datadog.netflow.aggregator.sequence.last", 94, "", []string{"exporter_ip:127.0.0.1", "device_namespace:default", "flow_type:netflow5"}) sender.AssertMetric(t, "MonotonicCount", "datadog.netflow.decoder.messages", 1, "", []string{"collector_type:netflow5", "worker:0"}) - sender.AssertMetric(t, "MonotonicCount", "datadog.netflow.processor.flows", 1, "", []string{"exporter_ip:127.0.0.1", "version:5", "flow_protocol:netflow"}) + sender.AssertMetric(t, "MonotonicCount", "datadog.netflow.processor.processed", 1, "", []string{"exporter_ip:127.0.0.1", "version:5", "flow_protocol:netflow"}) sender.AssertMetric(t, "MonotonicCount", "datadog.netflow.processor.flowsets", 2, "", []string{"exporter_ip:127.0.0.1", "type:data_flow_set", "version:5", "flow_protocol:netflow"}) - sender.AssertMetric(t, "MonotonicCount", "datadog.netflow.traffic.bytes", 120, "", []string{"listener_port:52056", "exporter_ip:127.0.0.1", "collector_type:netflow5"}) - sender.AssertMetric(t, "MonotonicCount", "datadog.netflow.traffic.packets", 1, "", []string{"listener_port:52056", "exporter_ip:127.0.0.1", "collector_type:netflow5"}) + sender.AssertMetric(t, "MonotonicCount", "datadog.netflow.traffic.bytes", 120, "", []string{fmt.Sprintf("listener_port:%d", port), "exporter_ip:127.0.0.1", "collector_type:netflow5"}) + sender.AssertMetric(t, "MonotonicCount", "datadog.netflow.traffic.packets", 1, "", []string{fmt.Sprintf("listener_port:%d", port), "exporter_ip:127.0.0.1", "collector_type:netflow5"}) flowState.Shutdown() aggregator.Stop() @@ -853,3 +855,295 @@ func TestFlowAggregator_sendExporterMetadata_singleExporterIpWithMultipleFlowTyp // call sendExporterMetadata does not trigger any call to epForwarder.SendEventPlatformEventBlocking(...) aggregator.sendExporterMetadata(flows, now) } + +func TestFlowAggregator_getSequenceDelta(t *testing.T) { + type round struct { + flowsToFlush []*common.Flow + expectedSequenceDelta map[SequenceDeltaKey]SequenceDeltaValue + } + tests := []struct { + name string + rounds []round + }{ + { + name: "multiple namespaces", + rounds: []round{ + { + flowsToFlush: []*common.Flow{ + { + Namespace: "ns1", + ExporterAddr: []byte{127, 0, 0, 11}, + SequenceNum: 10, + FlowType: common.TypeNetFlow5, + }, + { + Namespace: "ns1", + ExporterAddr: []byte{127, 0, 0, 11}, + SequenceNum: 20, + FlowType: common.TypeNetFlow5, + }, + { + Namespace: "ns2", + ExporterAddr: []byte{127, 0, 0, 11}, + SequenceNum: 30, + FlowType: common.TypeNetFlow5, + }, + }, + expectedSequenceDelta: map[SequenceDeltaKey]SequenceDeltaValue{ + {FlowType: common.TypeNetFlow5, Namespace: "ns1", ExporterIP: "127.0.0.11"}: {LastSequence: 20, Delta: 0}, + {FlowType: common.TypeNetFlow5, Namespace: "ns2", ExporterIP: "127.0.0.11"}: {LastSequence: 30, Delta: 0}, + }, + }, + { + flowsToFlush: []*common.Flow{ + { + Namespace: "ns1", + ExporterAddr: []byte{127, 0, 0, 11}, + SequenceNum: 30, + FlowType: common.TypeNetFlow5, + }, + { + Namespace: "ns1", + ExporterAddr: []byte{127, 0, 0, 11}, + SequenceNum: 40, + FlowType: common.TypeNetFlow5, + }, + { + Namespace: "ns2", + ExporterAddr: []byte{127, 0, 0, 11}, + SequenceNum: 60, + FlowType: common.TypeNetFlow5, + }, + }, + expectedSequenceDelta: map[SequenceDeltaKey]SequenceDeltaValue{ + {FlowType: common.TypeNetFlow5, Namespace: "ns1", ExporterIP: "127.0.0.11"}: {LastSequence: 40, Delta: 20}, + {FlowType: common.TypeNetFlow5, Namespace: "ns2", ExporterIP: "127.0.0.11"}: {LastSequence: 60, Delta: 30}, + }, + }, + }, + }, + { + name: "sequence reset", + rounds: []round{ + { + flowsToFlush: []*common.Flow{ + { + Namespace: "ns1", + ExporterAddr: []byte{127, 0, 0, 11}, + SequenceNum: 10000, + FlowType: common.TypeNetFlow5, + }, + }, + expectedSequenceDelta: map[SequenceDeltaKey]SequenceDeltaValue{ + {FlowType: common.TypeNetFlow5, Namespace: "ns1", ExporterIP: "127.0.0.11"}: {LastSequence: 10000, Delta: 0}, + }, + }, + { + flowsToFlush: []*common.Flow{ + { + Namespace: "ns1", + ExporterAddr: []byte{127, 0, 0, 11}, + SequenceNum: 100, + FlowType: common.TypeNetFlow5, + }, + }, + expectedSequenceDelta: map[SequenceDeltaKey]SequenceDeltaValue{ + {FlowType: common.TypeNetFlow5, Namespace: "ns1", ExporterIP: "127.0.0.11"}: {LastSequence: 100, Delta: 100, Reset: true}, + }, + }, + }, + }, + { + name: "negative delta and sequence reset for netflow5", + rounds: []round{ + { + flowsToFlush: []*common.Flow{ + { + Namespace: "ns1", + ExporterAddr: []byte{127, 0, 0, 11}, + SequenceNum: 10000, + FlowType: common.TypeNetFlow5, + }, + }, + expectedSequenceDelta: map[SequenceDeltaKey]SequenceDeltaValue{ + {FlowType: common.TypeNetFlow5, Namespace: "ns1", ExporterIP: "127.0.0.11"}: {LastSequence: 10000, Delta: 0}, + }, + }, + { // trigger sequence reset since delta -1100 is less than -1000 + flowsToFlush: []*common.Flow{ + { + Namespace: "ns1", + ExporterAddr: []byte{127, 0, 0, 11}, + SequenceNum: 8900, + FlowType: common.TypeNetFlow5, + }, + }, + expectedSequenceDelta: map[SequenceDeltaKey]SequenceDeltaValue{ + {FlowType: common.TypeNetFlow5, Namespace: "ns1", ExporterIP: "127.0.0.11"}: {LastSequence: 8900, Delta: 8900, Reset: true}, + }, + }, + { // negative delta without sequence reset + flowsToFlush: []*common.Flow{ + { + Namespace: "ns1", + ExporterAddr: []byte{127, 0, 0, 11}, + SequenceNum: 8500, + FlowType: common.TypeNetFlow5, + }, + }, + expectedSequenceDelta: map[SequenceDeltaKey]SequenceDeltaValue{ + {FlowType: common.TypeNetFlow5, Namespace: "ns1", ExporterIP: "127.0.0.11"}: {LastSequence: 8500, Delta: 0}, + }, + }, + }, + }, + { + name: "negative delta and sequence reset for sflow5", + rounds: []round{ + { + flowsToFlush: []*common.Flow{ + { + Namespace: "ns1", + ExporterAddr: []byte{127, 0, 0, 11}, + SequenceNum: 10000, + FlowType: common.TypeSFlow5, + }, + }, + expectedSequenceDelta: map[SequenceDeltaKey]SequenceDeltaValue{ + {FlowType: common.TypeSFlow5, Namespace: "ns1", ExporterIP: "127.0.0.11"}: {LastSequence: 10000, Delta: 0}, + }, + }, + { // trigger sequence reset since delta -1100 is less than -1000 + flowsToFlush: []*common.Flow{ + { + Namespace: "ns1", + ExporterAddr: []byte{127, 0, 0, 11}, + SequenceNum: 8900, + FlowType: common.TypeSFlow5, + }, + }, + expectedSequenceDelta: map[SequenceDeltaKey]SequenceDeltaValue{ + {FlowType: common.TypeSFlow5, Namespace: "ns1", ExporterIP: "127.0.0.11"}: {LastSequence: 8900, Delta: 8900, Reset: true}, + }, + }, + { // negative delta without sequence reset + flowsToFlush: []*common.Flow{ + { + Namespace: "ns1", + ExporterAddr: []byte{127, 0, 0, 11}, + SequenceNum: 8500, + FlowType: common.TypeSFlow5, + }, + }, + expectedSequenceDelta: map[SequenceDeltaKey]SequenceDeltaValue{ + {FlowType: common.TypeSFlow5, Namespace: "ns1", ExporterIP: "127.0.0.11"}: {LastSequence: 8500, Delta: 0}, + }, + }, + }, + }, + { + name: "negative delta and sequence reset for netflow9", + rounds: []round{ + { + flowsToFlush: []*common.Flow{ + { + Namespace: "ns1", + ExporterAddr: []byte{127, 0, 0, 11}, + SequenceNum: 10000, + FlowType: common.TypeNetFlow9, + }, + }, + expectedSequenceDelta: map[SequenceDeltaKey]SequenceDeltaValue{ + {FlowType: common.TypeNetFlow9, Namespace: "ns1", ExporterIP: "127.0.0.11"}: {LastSequence: 10000, Delta: 0}, + }, + }, + { // trigger sequence reset since delta -200 is less than -100 + flowsToFlush: []*common.Flow{ + { + Namespace: "ns1", + ExporterAddr: []byte{127, 0, 0, 11}, + SequenceNum: 9800, + FlowType: common.TypeNetFlow9, + }, + }, + expectedSequenceDelta: map[SequenceDeltaKey]SequenceDeltaValue{ + {FlowType: common.TypeNetFlow9, Namespace: "ns1", ExporterIP: "127.0.0.11"}: {LastSequence: 9800, Delta: 9800, Reset: true}, + }, + }, + { // negative delta without sequence reset + flowsToFlush: []*common.Flow{ + { + Namespace: "ns1", + ExporterAddr: []byte{127, 0, 0, 11}, + SequenceNum: 9750, + FlowType: common.TypeNetFlow9, + }, + }, + expectedSequenceDelta: map[SequenceDeltaKey]SequenceDeltaValue{ + {FlowType: common.TypeNetFlow9, Namespace: "ns1", ExporterIP: "127.0.0.11"}: {LastSequence: 9750, Delta: 0}, + }, + }, + }, + }, + { + name: "negative delta and sequence reset for IPFIX", + rounds: []round{ + { + flowsToFlush: []*common.Flow{ + { + Namespace: "ns1", + ExporterAddr: []byte{127, 0, 0, 11}, + SequenceNum: 10000, + FlowType: common.TypeIPFIX, + }, + }, + expectedSequenceDelta: map[SequenceDeltaKey]SequenceDeltaValue{ + {FlowType: common.TypeIPFIX, Namespace: "ns1", ExporterIP: "127.0.0.11"}: {LastSequence: 10000, Delta: 0}, + }, + }, + { // trigger sequence reset since delta -200 is less than -100 + flowsToFlush: []*common.Flow{ + { + Namespace: "ns1", + ExporterAddr: []byte{127, 0, 0, 11}, + SequenceNum: 9800, + FlowType: common.TypeIPFIX, + }, + }, + expectedSequenceDelta: map[SequenceDeltaKey]SequenceDeltaValue{ + {FlowType: common.TypeIPFIX, Namespace: "ns1", ExporterIP: "127.0.0.11"}: {LastSequence: 9800, Delta: 9800, Reset: true}, + }, + }, + { // negative delta without sequence reset + flowsToFlush: []*common.Flow{ + { + Namespace: "ns1", + ExporterAddr: []byte{127, 0, 0, 11}, + SequenceNum: 9750, + FlowType: common.TypeIPFIX, + }, + }, + expectedSequenceDelta: map[SequenceDeltaKey]SequenceDeltaValue{ + {FlowType: common.TypeIPFIX, Namespace: "ns1", ExporterIP: "127.0.0.11"}: {LastSequence: 9750, Delta: 0}, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sender := mocksender.NewMockSender("") + conf := config.NetflowConfig{ + StopTimeout: 10, + AggregatorBufferSize: 20, + AggregatorFlushInterval: 1, + AggregatorPortRollupThreshold: 10, + AggregatorRollupTrackerRefreshInterval: 3600, + } + agg := NewFlowAggregator(sender, nil, &conf, "my-hostname") + for roundNum, testRound := range tt.rounds { + assert.Equal(t, testRound.expectedSequenceDelta, agg.getSequenceDelta(testRound.flowsToFlush), fmt.Sprintf("Test Round %d", roundNum)) + } + }) + } +} diff --git a/pkg/netflow/flowaggregator/buildpayload.go b/pkg/netflow/flowaggregator/buildpayload.go index f195975bddfea..275106ddf37d4 100644 --- a/pkg/netflow/flowaggregator/buildpayload.go +++ b/pkg/netflow/flowaggregator/buildpayload.go @@ -6,18 +6,21 @@ package flowaggregator import ( + "time" + "github.com/DataDog/datadog-agent/pkg/netflow/common" "github.com/DataDog/datadog-agent/pkg/netflow/enrichment" "github.com/DataDog/datadog-agent/pkg/netflow/payload" "github.com/DataDog/datadog-agent/pkg/netflow/portrollup" ) -func buildPayload(aggFlow *common.Flow, hostname string) payload.FlowPayload { +func buildPayload(aggFlow *common.Flow, hostname string, flushTime time.Time) payload.FlowPayload { return payload.FlowPayload{ // TODO: Implement Tos - FlowType: string(aggFlow.FlowType), - SamplingRate: aggFlow.SamplingRate, - Direction: enrichment.RemapDirection(aggFlow.Direction), + FlushTimestamp: flushTime.UnixMilli(), + FlowType: string(aggFlow.FlowType), + SamplingRate: aggFlow.SamplingRate, + Direction: enrichment.RemapDirection(aggFlow.Direction), Device: payload.Device{ Namespace: aggFlow.Namespace, }, diff --git a/pkg/netflow/flowaggregator/buildpayload_test.go b/pkg/netflow/flowaggregator/buildpayload_test.go index 5b4b20604a8e2..cf048a06b5666 100644 --- a/pkg/netflow/flowaggregator/buildpayload_test.go +++ b/pkg/netflow/flowaggregator/buildpayload_test.go @@ -7,6 +7,7 @@ package flowaggregator import ( "testing" + "time" "github.com/stretchr/testify/assert" @@ -15,6 +16,7 @@ import ( ) func Test_buildPayload(t *testing.T) { + curTime := time.Now() tests := []struct { name string flow common.Flow @@ -49,15 +51,16 @@ func Test_buildPayload(t *testing.T) { TCPFlags: uint32(19), // 19 = SYN,ACK,FIN }, expectedPayload: payload.FlowPayload{ - FlowType: "netflow9", - SamplingRate: 10, - Direction: "egress", - Start: 1234568, - End: 1234569, - Bytes: 10, - Packets: 2, - EtherType: "IPv4", - IPProtocol: "TCP", + FlushTimestamp: curTime.UnixMilli(), + FlowType: "netflow9", + SamplingRate: 10, + Direction: "egress", + Start: 1234568, + End: 1234569, + Bytes: 10, + Packets: 2, + EtherType: "IPv4", + IPProtocol: "TCP", Device: payload.Device{ Namespace: "my-namespace", }, @@ -113,15 +116,16 @@ func Test_buildPayload(t *testing.T) { TCPFlags: uint32(19), // 19 = SYN,ACK,FIN }, expectedPayload: payload.FlowPayload{ - FlowType: "netflow9", - SamplingRate: 10, - Direction: "egress", - Start: 1234568, - End: 1234569, - Bytes: 10, - Packets: 2, - EtherType: "IPv4", - IPProtocol: "TCP", + FlushTimestamp: curTime.UnixMilli(), + FlowType: "netflow9", + SamplingRate: 10, + Direction: "egress", + Start: 1234568, + End: 1234569, + Bytes: 10, + Packets: 2, + EtherType: "IPv4", + IPProtocol: "TCP", Device: payload.Device{ Namespace: "my-namespace", }, @@ -177,15 +181,16 @@ func Test_buildPayload(t *testing.T) { TCPFlags: uint32(19), // 19 = SYN,ACK,FIN }, expectedPayload: payload.FlowPayload{ - FlowType: "netflow9", - SamplingRate: 10, - Direction: "egress", - Start: 1234568, - End: 1234569, - Bytes: 10, - Packets: 2, - EtherType: "IPv4", - IPProtocol: "TCP", + FlushTimestamp: curTime.UnixMilli(), + FlowType: "netflow9", + SamplingRate: 10, + Direction: "egress", + Start: 1234568, + End: 1234569, + Bytes: 10, + Packets: 2, + EtherType: "IPv4", + IPProtocol: "TCP", Device: payload.Device{ Namespace: "my-namespace", }, @@ -215,7 +220,7 @@ func Test_buildPayload(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - flowPayload := buildPayload(&tt.flow, "my-hostname") + flowPayload := buildPayload(&tt.flow, "my-hostname", curTime) assert.Equal(t, tt.expectedPayload, flowPayload) }) } diff --git a/pkg/netflow/flowaggregator/flowaccumulator.go b/pkg/netflow/flowaggregator/flowaccumulator.go index 66fa20d921f67..3803f04e102ca 100644 --- a/pkg/netflow/flowaggregator/flowaccumulator.go +++ b/pkg/netflow/flowaggregator/flowaccumulator.go @@ -134,6 +134,7 @@ func (f *flowAccumulator) add(flowToAdd *common.Flow) { aggFlow.flow.Packets += flowToAdd.Packets aggFlow.flow.StartTimestamp = common.MinUint64(aggFlow.flow.StartTimestamp, flowToAdd.StartTimestamp) aggFlow.flow.EndTimestamp = common.MaxUint64(aggFlow.flow.EndTimestamp, flowToAdd.EndTimestamp) + aggFlow.flow.SequenceNum = common.MaxUint32(aggFlow.flow.SequenceNum, flowToAdd.SequenceNum) aggFlow.flow.TCPFlags |= flowToAdd.TCPFlags } f.flows[aggHash] = aggFlow diff --git a/pkg/netflow/goflowlib/convert.go b/pkg/netflow/goflowlib/convert.go index 0a798bd7d7129..d0a797c4a096b 100644 --- a/pkg/netflow/goflowlib/convert.go +++ b/pkg/netflow/goflowlib/convert.go @@ -16,6 +16,7 @@ func ConvertFlow(srcFlow *flowpb.FlowMessage, namespace string) *common.Flow { return &common.Flow{ Namespace: namespace, FlowType: convertFlowType(srcFlow.Type), + SequenceNum: srcFlow.SequenceNum, SamplingRate: srcFlow.SamplingRate, Direction: srcFlow.FlowDirection, ExporterAddr: srcFlow.SamplerAddress, // Sampler is renamed to Exporter since it's a more commonly used diff --git a/pkg/netflow/goflowlib/convert_test.go b/pkg/netflow/goflowlib/convert_test.go index 6af9086dc44b6..f122d327ff88c 100644 --- a/pkg/netflow/goflowlib/convert_test.go +++ b/pkg/netflow/goflowlib/convert_test.go @@ -51,6 +51,7 @@ func TestConvertFlow(t *testing.T) { srcFlow := flowpb.FlowMessage{ Type: flowpb.FlowMessage_NETFLOW_V9, TimeReceived: 1234567, + SequenceNum: 20, SamplingRate: 10, FlowDirection: 1, SamplerAddress: []byte{127, 0, 0, 1}, @@ -76,6 +77,7 @@ func TestConvertFlow(t *testing.T) { expectedFlow := common.Flow{ Namespace: "my-ns", FlowType: common.TypeNetFlow9, + SequenceNum: 20, SamplingRate: 10, Direction: 1, ExporterAddr: []byte{127, 0, 0, 1}, diff --git a/pkg/netflow/goflowlib/metric.go b/pkg/netflow/goflowlib/metric.go index df919dddbdeb6..2e4a66a528713 100644 --- a/pkg/netflow/goflowlib/metric.go +++ b/pkg/netflow/goflowlib/metric.go @@ -66,7 +66,7 @@ var metricNameMapping = map[string]mappedMetric{ }, }, "flow_process_nf_count": { - name: "processor.flows", + name: "processor.processed", allowedTagKeys: []string{"router", "version"}, keyRemapper: map[string]string{ "router": "exporter_ip", @@ -109,7 +109,7 @@ var metricNameMapping = map[string]mappedMetric{ }, }, "flow_process_sf_count": { - name: "processor.flows", + name: "processor.processed", allowedTagKeys: []string{"router", "version"}, keyRemapper: map[string]string{ "router": "exporter_ip", diff --git a/pkg/netflow/goflowlib/metric_test.go b/pkg/netflow/goflowlib/metric_test.go index 17f7e297958b0..48ec43bf304dd 100644 --- a/pkg/netflow/goflowlib/metric_test.go +++ b/pkg/netflow/goflowlib/metric_test.go @@ -77,7 +77,7 @@ func TestConvertMetric(t *testing.T) { }, }, expectedMetricType: metrics.MonotonicCountType, - expectedName: "processor.flows", + expectedName: "processor.processed", expectedValue: 10.0, expectedTags: []string{"exporter_ip:1.2.3.4", "version:5", "flow_protocol:netflow"}, expectedErr: "", @@ -96,7 +96,7 @@ func TestConvertMetric(t *testing.T) { }, }, expectedMetricType: metrics.MonotonicCountType, - expectedName: "processor.flows", + expectedName: "processor.processed", expectedValue: 10.0, expectedTags: []string{"exporter_ip:1.2.3.4", "version:5", "flow_protocol:netflow"}, expectedErr: "", @@ -115,7 +115,7 @@ func TestConvertMetric(t *testing.T) { }, }, expectedMetricType: metrics.GaugeType, - expectedName: "processor.flows", + expectedName: "processor.processed", expectedValue: 10.0, expectedTags: []string{"exporter_ip:1.2.3.4", "version:5", "flow_protocol:netflow"}, expectedErr: "", @@ -348,7 +348,7 @@ func TestConvertMetric(t *testing.T) { }, }, expectedMetricType: metrics.MonotonicCountType, - expectedName: "processor.flows", + expectedName: "processor.processed", expectedValue: 10.0, expectedTags: []string{"exporter_ip:1.2.3.4", "version:5", "flow_protocol:netflow"}, expectedErr: "", @@ -427,7 +427,7 @@ func TestConvertMetric(t *testing.T) { }, }, expectedMetricType: metrics.MonotonicCountType, - expectedName: "processor.flows", + expectedName: "processor.processed", expectedValue: 10.0, expectedTags: []string{"exporter_ip:1.2.3.4", "version:5", "flow_protocol:sflow"}, expectedErr: "", diff --git a/pkg/netflow/payload/payload.go b/pkg/netflow/payload/payload.go index 7de1d647790db..d901092c254ed 100644 --- a/pkg/netflow/payload/payload.go +++ b/pkg/netflow/payload/payload.go @@ -40,22 +40,23 @@ type ObservationPoint struct { // FlowPayload contains network devices flows type FlowPayload struct { - FlowType string `json:"type"` - SamplingRate uint64 `json:"sampling_rate"` - Direction string `json:"direction"` - Start uint64 `json:"start"` // in seconds - End uint64 `json:"end"` // in seconds - Bytes uint64 `json:"bytes"` - Packets uint64 `json:"packets"` - EtherType string `json:"ether_type,omitempty"` - IPProtocol string `json:"ip_protocol"` - Device Device `json:"device"` - Exporter Exporter `json:"exporter"` - Source Endpoint `json:"source"` - Destination Endpoint `json:"destination"` - Ingress ObservationPoint `json:"ingress"` - Egress ObservationPoint `json:"egress"` - Host string `json:"host"` - TCPFlags []string `json:"tcp_flags,omitempty"` - NextHop NextHop `json:"next_hop,omitempty"` + FlushTimestamp int64 `json:"flush_timestamp"` + FlowType string `json:"type"` + SamplingRate uint64 `json:"sampling_rate"` + Direction string `json:"direction"` + Start uint64 `json:"start"` // in seconds + End uint64 `json:"end"` // in seconds + Bytes uint64 `json:"bytes"` + Packets uint64 `json:"packets"` + EtherType string `json:"ether_type,omitempty"` + IPProtocol string `json:"ip_protocol"` + Device Device `json:"device"` + Exporter Exporter `json:"exporter"` + Source Endpoint `json:"source"` + Destination Endpoint `json:"destination"` + Ingress ObservationPoint `json:"ingress"` + Egress ObservationPoint `json:"egress"` + Host string `json:"host"` + TCPFlags []string `json:"tcp_flags,omitempty"` + NextHop NextHop `json:"next_hop,omitempty"` } diff --git a/pkg/netflow/server_integration_test.go b/pkg/netflow/server_integration_test.go index e7eed458b1222..e1f9937619e4e 100644 --- a/pkg/netflow/server_integration_test.go +++ b/pkg/netflow/server_integration_test.go @@ -24,7 +24,8 @@ import ( func TestNetFlow_IntegrationTest_NetFlow5(t *testing.T) { // Setup NetFlow feature config - port := uint16(52055) + port := testutil.GetFreePort() + flushTime, _ := time.Parse(time.RFC3339, "2019-02-18T16:00:06Z") config.Datadog.SetConfigType("yaml") err := config.Datadog.MergeConfigOverride(strings.NewReader(fmt.Sprintf(` network_devices: @@ -52,6 +53,10 @@ network_devices: require.NoError(t, err, "cannot start Netflow Server") assert.NotNil(t, server) + server.flowAgg.TimeNowFunction = func() time.Time { + return flushTime + } + // Send netflowV5Data twice to test aggregator // Flows will have 2x bytes/packets after aggregation time.Sleep(100 * time.Millisecond) // wait to make sure goflow listener is started before sending @@ -72,7 +77,7 @@ network_devices: func TestNetFlow_IntegrationTest_NetFlow9(t *testing.T) { // Setup NetFlow feature config - port := uint16(52056) + port := testutil.GetFreePort() config.Datadog.SetConfigType("yaml") err := config.Datadog.MergeConfigOverride(strings.NewReader(fmt.Sprintf(` network_devices: @@ -118,7 +123,7 @@ network_devices: func TestNetFlow_IntegrationTest_SFlow5(t *testing.T) { // Setup NetFlow feature config - port := uint16(52057) + port := testutil.GetFreePort() config.Datadog.SetConfigType("yaml") err := config.Datadog.MergeConfigOverride(strings.NewReader(fmt.Sprintf(` network_devices: diff --git a/pkg/netflow/server_test.go b/pkg/netflow/server_test.go index 07373c79a47bd..864578550cbee 100644 --- a/pkg/netflow/server_test.go +++ b/pkg/netflow/server_test.go @@ -16,13 +16,16 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/config" + + "github.com/DataDog/datadog-agent/pkg/netflow/testutil" ) func TestStartServerAndStopServer(t *testing.T) { demux := aggregator.InitTestAgentDemultiplexerWithFlushInterval(10 * time.Millisecond) defer demux.Stop(false) - port := uint16(52056) + port := testutil.GetFreePort() + config.Datadog.SetConfigType("yaml") err := config.Datadog.MergeConfigOverride(strings.NewReader(fmt.Sprintf(` network_devices: @@ -59,7 +62,8 @@ func TestIsEnabled(t *testing.T) { func TestServer_Stop(t *testing.T) { // Setup NetFlow config - port := uint16(12056) + port := testutil.GetFreePort() + config.Datadog.SetConfigType("yaml") err := config.Datadog.MergeConfigOverride(strings.NewReader(fmt.Sprintf(` network_devices: diff --git a/pkg/netflow/testutil/freeport.go b/pkg/netflow/testutil/freeport.go new file mode 100644 index 0000000000000..75f48c4970766 --- /dev/null +++ b/pkg/netflow/testutil/freeport.go @@ -0,0 +1,43 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023-present Datadog, Inc. + +//go:build test + +package testutil + +import ( + "net" + "strconv" +) + +func GetFreePort() uint16 { + var port uint16 + for i := 0; i < 5; i++ { + conn, err := net.ListenPacket("udp", ":0") + if err != nil { + continue + } + conn.Close() + port, err = parsePort(conn.LocalAddr().String()) + if err != nil { + continue + } + return port + } + panic("unable to find free port for starting the trap listener") +} + +func parsePort(addr string) (uint16, error) { + _, portString, err := net.SplitHostPort(addr) + if err != nil { + return 0, err + } + + port, err := strconv.ParseUint(portString, 10, 16) + if err != nil { + return 0, err + } + return uint16(port), nil +} diff --git a/pkg/netflow/testutil/testutil.go b/pkg/netflow/testutil/testutil.go index a22c1345a2112..d995f0dac323d 100644 --- a/pkg/netflow/testutil/testutil.go +++ b/pkg/netflow/testutil/testutil.go @@ -48,6 +48,7 @@ func ExpectNetflow5Payloads(t *testing.T, mockEpForwrader *epforwarder.MockEvent events := [][]byte{ []byte(` { + "flush_timestamp": 1550505606000, "type": "netflow5", "sampling_rate": 0, "direction": "ingress", @@ -93,6 +94,7 @@ func ExpectNetflow5Payloads(t *testing.T, mockEpForwrader *epforwarder.MockEvent `), []byte(` { + "flush_timestamp": 1550505606000, "type": "netflow5", "sampling_rate": 0, "direction": "ingress", diff --git a/pkg/network/config/config.go b/pkg/network/config/config.go index 6d094e3883bb3..b40e5a4b32091 100644 --- a/pkg/network/config/config.go +++ b/pkg/network/config/config.go @@ -6,13 +6,12 @@ package config import ( - "runtime" "strings" "time" + sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" ddconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/ebpf" - "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -26,11 +25,6 @@ const ( defaultUDPTimeoutSeconds = 30 defaultUDPStreamTimeoutSeconds = 120 - - defaultOffsetThreshold = 400 - maxOffsetThreshold = 3000 - - defaultMaxProcessesTracked = 1024 ) // Config stores all flags used by the network eBPF tracer @@ -146,11 +140,11 @@ type Config struct { TCPClosedTimeout time.Duration // MaxTrackedConnections specifies the maximum number of connections we can track. This determines the size of the eBPF Maps - MaxTrackedConnections uint + MaxTrackedConnections uint32 // MaxClosedConnectionsBuffered represents the maximum number of closed connections we'll buffer in memory. These closed connections // get flushed on every client request (default 30s check interval) - MaxClosedConnectionsBuffered int + MaxClosedConnectionsBuffered uint32 // ClosedConnectionFlushThreshold represents the number of closed connections stored before signalling // the agent to flush the connections. This value only valid on Windows @@ -262,6 +256,9 @@ func join(pieces ...string) string { // New creates a config for the network tracer func New() *Config { cfg := ddconfig.SystemProbe + if !sysconfig.IsAdjusted(cfg) { + sysconfig.Adjust(cfg) + } c := &Config{ Config: *ebpf.NewConfig(), @@ -270,13 +267,13 @@ func New() *Config { ServiceMonitoringEnabled: cfg.GetBool(join(smNS, "enabled")), DataStreamsEnabled: cfg.GetBool(join(dsNS, "enabled")), - CollectTCPv4Conns: true, - CollectTCPv6Conns: true, + CollectTCPv4Conns: cfg.GetBool(join(netNS, "collect_tcp_v4")), + CollectTCPv6Conns: cfg.GetBool(join(netNS, "collect_tcp_v6")), TCPConnTimeout: 2 * time.Minute, TCPClosedTimeout: 1 * time.Second, - CollectUDPv4Conns: true, - CollectUDPv6Conns: true, + CollectUDPv4Conns: cfg.GetBool(join(netNS, "collect_udp_v4")), + CollectUDPv6Conns: cfg.GetBool(join(netNS, "collect_udp_v6")), UDPConnTimeout: defaultUDPTimeoutSeconds * time.Second, UDPStreamTimeout: defaultUDPStreamTimeoutSeconds * time.Second, @@ -284,8 +281,8 @@ func New() *Config { ExcludedSourceConnections: cfg.GetStringMapStringSlice(join(spNS, "source_excludes")), ExcludedDestinationConnections: cfg.GetStringMapStringSlice(join(spNS, "dest_excludes")), - MaxTrackedConnections: uint(cfg.GetInt(join(spNS, "max_tracked_connections"))), - MaxClosedConnectionsBuffered: cfg.GetInt(join(spNS, "max_closed_connections_buffered")), + MaxTrackedConnections: uint32(cfg.GetInt64(join(spNS, "max_tracked_connections"))), + MaxClosedConnectionsBuffered: uint32(cfg.GetInt64(join(spNS, "max_closed_connections_buffered"))), ClosedConnectionFlushThreshold: cfg.GetInt(join(spNS, "closed_connection_flush_threshold")), ClosedChannelSize: cfg.GetInt(join(spNS, "closed_channel_size")), MaxConnectionsStateBuffered: cfg.GetInt(join(spNS, "max_connection_state_buffered")), @@ -301,7 +298,7 @@ func New() *Config { ProtocolClassificationEnabled: cfg.GetBool(join(netNS, "enable_protocol_classification")), - EnableHTTPMonitoring: cfg.GetBool(join(netNS, "enable_http_monitoring")), + EnableHTTPMonitoring: cfg.GetBool(join(smNS, "enable_http_monitoring")), EnableHTTP2Monitoring: cfg.GetBool(join(smNS, "enable_http2_monitoring")), EnableHTTPSMonitoring: cfg.GetBool(join(netNS, "enable_https_monitoring")), MaxHTTPStatsBuffered: cfg.GetInt(join(netNS, "max_http_stats_buffered")), @@ -345,46 +342,6 @@ func New() *Config { EnableHTTPStatsByStatusCode: cfg.GetBool(join(smNS, "enable_http_stats_by_status_code")), } - if cfg.GetBool(join(spNS, "disable_tcp")) { - c.CollectTCPv4Conns = false - c.CollectTCPv6Conns = false - } - if cfg.GetBool(join(spNS, "disable_udp")) { - c.CollectUDPv4Conns = false - c.CollectUDPv6Conns = false - } - if cfg.GetBool(join(spNS, "disable_ipv6")) { - c.CollectTCPv6Conns = false - c.CollectUDPv6Conns = false - } - - if runtime.GOOS == "windows" { - if cfg.IsSet(join(spNS, "closed_connection_flush_threshold")) && c.ClosedConnectionFlushThreshold < 1024 { - log.Warnf("Closed connection notification threshold set to invalid value %d. Resetting to default.", c.ClosedConnectionFlushThreshold) - - // 0 will allow the underlying driver interface mechanism to choose appropriately - c.ClosedConnectionFlushThreshold = 0 - } - } - if !cfg.IsSet(join(spNS, "max_closed_connections_buffered")) { - // make sure max_closed_connections_buffered is equal to - // max_tracked_connections, since the former is not set. - // this helps with lowering or eliminating dropped - // closed connections in environments with mostly short-lived - // connections - c.MaxClosedConnectionsBuffered = int(c.MaxTrackedConnections) - } - if c.HTTPNotificationThreshold >= c.MaxTrackedHTTPConnections { - log.Warnf("Notification threshold set higher than tracked connections. %d > %d ; resetting to %d", - c.HTTPNotificationThreshold, c.MaxTrackedHTTPConnections, c.MaxTrackedHTTPConnections/2) - c.HTTPNotificationThreshold = c.MaxTrackedHTTPConnections / 2 - } - - maxHTTPFrag := uint64(160) - if c.HTTPMaxRequestFragment > int64(maxHTTPFrag) { // dbtodo where is the actual max defined? - log.Warnf("Max HTTP fragment too large (%d) resetting to (%d) ", c.HTTPMaxRequestFragment, maxHTTPFrag) - c.HTTPMaxRequestFragment = int64(maxHTTPFrag) - } httpRRKey := join(netNS, "http_replace_rules") rr, err := parseReplaceRules(cfg, httpRRKey) if err != nil { @@ -393,69 +350,26 @@ func New() *Config { c.HTTPReplaceRules = rr } - if c.OffsetGuessThreshold > maxOffsetThreshold { - log.Warn("offset_guess_threshold exceeds maximum of 3000. Setting it to the default of 400") - c.OffsetGuessThreshold = defaultOffsetThreshold - } - if !c.CollectTCPv4Conns { - log.Info("network tracer TCPv4 tracing disabled by configuration") + log.Info("network tracer TCPv4 tracing disabled") } if !c.CollectUDPv4Conns { - log.Info("network tracer UDPv4 tracing disabled by configuration") + log.Info("network tracer UDPv4 tracing disabled") } - - if !kernel.IsIPv6Enabled() { - c.CollectTCPv6Conns = false - c.CollectUDPv6Conns = false - log.Info("network tracer IPv6 tracing disabled by system") - } else { - if !c.CollectTCPv6Conns { - log.Info("network tracer TCPv6 tracing disabled by configuration") - } - if !c.CollectUDPv6Conns { - log.Info("network tracer UDPv6 tracing disabled by configuration") - } + if !c.CollectTCPv6Conns { + log.Info("network tracer TCPv6 tracing disabled") + } + if !c.CollectUDPv6Conns { + log.Info("network tracer UDPv6 tracing disabled") } - if !c.DNSInspection { log.Info("network tracer DNS inspection disabled by configuration") } - c.ServiceMonitoringEnabled = c.ServiceMonitoringEnabled || c.DataStreamsEnabled - - if c.ServiceMonitoringEnabled { - cfg.Set(join(netNS, "enable_http_monitoring"), true) - c.EnableHTTPMonitoring = true - if !cfg.IsSet(join(netNS, "enable_https_monitoring")) { - cfg.Set(join(netNS, "enable_https_monitoring"), true) - c.EnableHTTPSMonitoring = true - } - - if !cfg.IsSet(join(spNS, "enable_runtime_compiler")) { - cfg.Set(join(spNS, "enable_runtime_compiler"), true) - c.EnableRuntimeCompiler = true - } - - if !cfg.IsSet(join(spNS, "enable_kernel_header_download")) { - cfg.Set(join(spNS, "enable_kernel_header_download"), true) - c.EnableKernelHeaderDownload = true - } - } - c.EnableKafkaMonitoring = c.DataStreamsEnabled if c.EnableProcessEventMonitoring { log.Info("network process event monitoring enabled") - - if c.MaxProcessesTracked <= 0 { - c.MaxProcessesTracked = defaultMaxProcessesTracked - } } - - if !c.EnableRootNetNs { - c.EnableConntrackAllNamespaces = false - } - return c } diff --git a/pkg/network/config/config_linux_test.go b/pkg/network/config/config_linux_test.go index f524e851ff3d3..c6ba2ff4b8d9e 100644 --- a/pkg/network/config/config_linux_test.go +++ b/pkg/network/config/config_linux_test.go @@ -11,13 +11,11 @@ import ( "github.com/stretchr/testify/require" "github.com/vishvananda/netns" - - "github.com/DataDog/datadog-agent/pkg/config" ) func TestDisableRootNetNamespace(t *testing.T) { newConfig(t) - config.SystemProbe.Set("network_config.enable_root_netns", false) + t.Setenv("DD_NETWORK_CONFIG_ENABLE_ROOT_NETNS", "false") cfg := New() require.False(t, cfg.EnableConntrackAllNamespaces) diff --git a/pkg/network/config/config_test.go b/pkg/network/config/config_test.go index afc10c0ae473c..b0f420d4c2d51 100644 --- a/pkg/network/config/config_test.go +++ b/pkg/network/config/config_test.go @@ -3,6 +3,8 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. +//go:build linux || windows + package config import ( @@ -114,6 +116,26 @@ func TestEnableHTTPStatsByStatusCode(t *testing.T) { } func TestEnableHTTPMonitoring(t *testing.T) { + t.Run("via deprecated YAML", func(t *testing.T) { + newConfig(t) + _, err := sysconfig.New("./testdata/TestDDAgentConfigYamlAndSystemProbeConfig-DeprecatedEnableHTTP.yaml") + require.NoError(t, err) + cfg := New() + + assert.True(t, cfg.EnableHTTPMonitoring) + }) + + t.Run("via deprecated ENV variable", func(t *testing.T) { + newConfig(t) + t.Setenv("DD_SYSTEM_PROBE_NETWORK_ENABLE_HTTP_MONITORING", "true") + + _, err := sysconfig.New("") + require.NoError(t, err) + cfg := New() + + assert.True(t, cfg.EnableHTTPMonitoring) + }) + t.Run("via YAML", func(t *testing.T) { newConfig(t) _, err := sysconfig.New("./testdata/TestDDAgentConfigYamlAndSystemProbeConfig-EnableHTTP.yaml") @@ -124,8 +146,32 @@ func TestEnableHTTPMonitoring(t *testing.T) { }) t.Run("via ENV variable", func(t *testing.T) { + newConfig(t) + t.Setenv("DD_SERVICE_MONITORING_CONFIG_ENABLE_HTTP_MONITORING", "true") + + _, err := sysconfig.New("") + require.NoError(t, err) + cfg := New() + + assert.True(t, cfg.EnableHTTPMonitoring) + }) + + t.Run("Deprecated is enabled, new is disabled", func(t *testing.T) { newConfig(t) t.Setenv("DD_SYSTEM_PROBE_NETWORK_ENABLE_HTTP_MONITORING", "true") + t.Setenv("DD_SERVICE_MONITORING_CONFIG_ENABLE_HTTP_MONITORING", "false") + + _, err := sysconfig.New("") + require.NoError(t, err) + cfg := New() + + assert.False(t, cfg.EnableHTTPMonitoring) + }) + + t.Run("Deprecated is disabled, new is enabled", func(t *testing.T) { + newConfig(t) + t.Setenv("DD_SYSTEM_PROBE_NETWORK_ENABLE_HTTP_MONITORING", "false") + t.Setenv("DD_SERVICE_MONITORING_CONFIG_ENABLE_HTTP_MONITORING", "true") _, err := sysconfig.New("") require.NoError(t, err) @@ -133,6 +179,25 @@ func TestEnableHTTPMonitoring(t *testing.T) { assert.True(t, cfg.EnableHTTPMonitoring) }) + + t.Run("Both enabled", func(t *testing.T) { + newConfig(t) + t.Setenv("DD_SYSTEM_PROBE_NETWORK_ENABLE_HTTP_MONITORING", "true") + t.Setenv("DD_SERVICE_MONITORING_CONFIG_ENABLE_HTTP_MONITORING", "true") + + _, err := sysconfig.New("") + require.NoError(t, err) + cfg := New() + + assert.True(t, cfg.EnableHTTPMonitoring) + }) + + t.Run("Not enabled", func(t *testing.T) { + newConfig(t) + cfg := New() + + assert.False(t, cfg.EnableHTTPMonitoring) + }) } func TestEnableDataStreams(t *testing.T) { @@ -420,21 +485,75 @@ func TestHTTPReplaceRules(t *testing.T) { }) } +func TestHTTPNotificationThreshold(t *testing.T) { + t.Run("via YAML", func(t *testing.T) { + newConfig(t) + _, err := sysconfig.New("./testdata/TestDDSystemProbeConfig-HTTPNotificationThreshold.yaml") + require.NoError(t, err) + cfg := New() + + require.Equal(t, cfg.HTTPNotificationThreshold, int64(100)) + }) + + t.Run("via ENV variable", func(t *testing.T) { + newConfig(t) + t.Setenv("DD_NETWORK_CONFIG_HTTP_NOTIFICATION_THRESHOLD", "100") + + cfg := New() + + require.Equal(t, cfg.HTTPNotificationThreshold, int64(100)) + }) + + t.Run("Not enabled", func(t *testing.T) { + newConfig(t) + cfg := New() + // Default value. + require.Equal(t, cfg.HTTPNotificationThreshold, int64(512)) + }) +} + +// Testing we're not exceeding the limit for http_notification_threshold. +func TestHTTPNotificationThresholdOverLimit(t *testing.T) { + t.Run("via YAML", func(t *testing.T) { + newConfig(t) + _, err := sysconfig.New("./testdata/TestDDSystemProbeConfig-HTTPNotificationThresholdOverLimit.yaml") + require.NoError(t, err) + cfg := New() + + require.Equal(t, cfg.HTTPNotificationThreshold, int64(512)) + }) + + t.Run("via ENV variable", func(t *testing.T) { + newConfig(t) + t.Setenv("DD_NETWORK_CONFIG_HTTP_NOTIFICATION_THRESHOLD", "2000") + + cfg := New() + + require.Equal(t, cfg.HTTPNotificationThreshold, int64(512)) + }) + + t.Run("Not enabled", func(t *testing.T) { + newConfig(t) + cfg := New() + // Default value. + require.Equal(t, cfg.HTTPNotificationThreshold, int64(512)) + }) +} + func TestMaxClosedConnectionsBuffered(t *testing.T) { maxTrackedConnections := New().MaxTrackedConnections t.Run("value set", func(t *testing.T) { newConfig(t) t.Setenv("DD_SYSTEM_PROBE_CONFIG_MAX_CLOSED_CONNECTIONS_BUFFERED", fmt.Sprintf("%d", maxTrackedConnections-1)) - cfg := New() - require.Equal(t, int(maxTrackedConnections-1), cfg.MaxClosedConnectionsBuffered) + require.Equal(t, maxTrackedConnections-1, cfg.MaxClosedConnectionsBuffered) }) t.Run("value not set", func(t *testing.T) { newConfig(t) cfg := New() - require.Equal(t, int(cfg.MaxTrackedConnections), cfg.MaxClosedConnectionsBuffered) + require.Equal(t, cfg.MaxTrackedConnections, cfg.MaxClosedConnectionsBuffered) }) } @@ -442,7 +561,6 @@ func TestMaxHTTPStatsBuffered(t *testing.T) { t.Run("value set through env var", func(t *testing.T) { newConfig(t) t.Setenv("DD_SYSTEM_PROBE_NETWORK_MAX_HTTP_STATS_BUFFERED", "50000") - cfg := New() assert.Equal(t, 50000, cfg.MaxHTTPStatsBuffered) }) @@ -453,7 +571,6 @@ func TestMaxHTTPStatsBuffered(t *testing.T) { network_config: max_http_stats_buffered: 30000 `) - assert.Equal(t, 30000, cfg.MaxHTTPStatsBuffered) }) } diff --git a/pkg/network/config/testdata/TestDDAgentConfigYamlAndSystemProbeConfig-DeprecatedEnableHTTP.yaml b/pkg/network/config/testdata/TestDDAgentConfigYamlAndSystemProbeConfig-DeprecatedEnableHTTP.yaml new file mode 100644 index 0000000000000..686f6ab71e6c3 --- /dev/null +++ b/pkg/network/config/testdata/TestDDAgentConfigYamlAndSystemProbeConfig-DeprecatedEnableHTTP.yaml @@ -0,0 +1,2 @@ +network_config: + enable_http_monitoring: true diff --git a/pkg/network/config/testdata/TestDDAgentConfigYamlAndSystemProbeConfig-EnableHTTP.yaml b/pkg/network/config/testdata/TestDDAgentConfigYamlAndSystemProbeConfig-EnableHTTP.yaml index 686f6ab71e6c3..a2c1acebb57d5 100644 --- a/pkg/network/config/testdata/TestDDAgentConfigYamlAndSystemProbeConfig-EnableHTTP.yaml +++ b/pkg/network/config/testdata/TestDDAgentConfigYamlAndSystemProbeConfig-EnableHTTP.yaml @@ -1,2 +1,2 @@ -network_config: +service_monitoring_config: enable_http_monitoring: true diff --git a/pkg/network/config/testdata/TestDDSystemProbeConfig-HTTPNotificationThreshold.yaml b/pkg/network/config/testdata/TestDDSystemProbeConfig-HTTPNotificationThreshold.yaml new file mode 100644 index 0000000000000..e4bbb12a1cac7 --- /dev/null +++ b/pkg/network/config/testdata/TestDDSystemProbeConfig-HTTPNotificationThreshold.yaml @@ -0,0 +1,2 @@ +network_config: + http_notification_threshold: 100 diff --git a/pkg/network/config/testdata/TestDDSystemProbeConfig-HTTPNotificationThresholdOverLimit.yaml b/pkg/network/config/testdata/TestDDSystemProbeConfig-HTTPNotificationThresholdOverLimit.yaml new file mode 100644 index 0000000000000..c31177ae4a1f3 --- /dev/null +++ b/pkg/network/config/testdata/TestDDSystemProbeConfig-HTTPNotificationThresholdOverLimit.yaml @@ -0,0 +1,2 @@ +network_config: + http_notification_threshold: 2000 diff --git a/pkg/network/dns/snooper.go b/pkg/network/dns/snooper.go index 53e55f25c071d..ecc690a9db4be 100644 --- a/pkg/network/dns/snooper.go +++ b/pkg/network/dns/snooper.go @@ -52,6 +52,7 @@ type socketFilterSnooper struct { exit chan struct{} wg sync.WaitGroup collectLocalDNS bool + once sync.Once // cache translation object to avoid allocations translation *translation @@ -132,13 +133,15 @@ func (s *socketFilterSnooper) Start() error { // Close terminates the DNS traffic snooper as well as the underlying socket and the attached filter func (s *socketFilterSnooper) Close() { - close(s.exit) - s.wg.Wait() - s.source.Close() - s.cache.Close() - if s.statKeeper != nil { - s.statKeeper.Close() - } + s.once.Do(func() { + close(s.exit) + s.wg.Wait() + s.source.Close() + s.cache.Close() + if s.statKeeper != nil { + s.statKeeper.Close() + } + }) } // processPacket retrieves DNS information from the received packet data and adds it to diff --git a/pkg/network/ebpf/c/co-re/tracer-fentry.c b/pkg/network/ebpf/c/co-re/tracer-fentry.c index 2bc478ab7ba34..5139f9e9863d8 100644 --- a/pkg/network/ebpf/c/co-re/tracer-fentry.c +++ b/pkg/network/ebpf/c/co-re/tracer-fentry.c @@ -19,6 +19,37 @@ BPF_PERCPU_HASH_MAP(udp6_send_skb_args, u64, u64, 1024) BPF_PERCPU_HASH_MAP(udp_send_skb_args, u64, conn_tuple_t, 1024) +#define RETURN_IF_NOT_IN_SYSPROBE_TASK(prog_name) \ + if (!event_in_task(prog_name)) { \ + return 0; \ + } + +static __always_inline __u32 systemprobe_dev() { + __u64 val = 0; + LOAD_CONSTANT("systemprobe_device", val); + return (__u32)val; +} + +static __always_inline __u32 systemprobe_ino() { + __u64 val = 0; + LOAD_CONSTANT("systemprobe_ino", val); + return (__u32)val; +} + +static __always_inline bool event_in_task(char *prog_name) { + __u32 dev = systemprobe_dev(); + __u32 ino = systemprobe_ino(); + struct bpf_pidns_info ns = {}; + + u64 error = bpf_get_ns_current_pid_tgid(dev, ino, &ns, sizeof(struct bpf_pidns_info)); + + if (error) { + log_debug("%s: err=event originates from outside current fargate task\n", prog_name); + } + + return !error; +} + static __always_inline int read_conn_tuple_partial_from_flowi4(conn_tuple_t *t, struct flowi4 *fl4, u64 pid_tgid, metadata_mask_t type) { t->pid = pid_tgid >> 32; t->metadata = type; @@ -105,6 +136,7 @@ static __always_inline int read_conn_tuple_partial_from_flowi6(conn_tuple_t *t, SEC("fexit/tcp_sendmsg") int BPF_PROG(tcp_sendmsg_exit, struct sock *sk, struct msghdr *msg, size_t size, int sent) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fexit/tcp_sendmsg"); if (sent < 0) { log_debug("fexit/tcp_sendmsg: tcp_sendmsg err=%d\n", sent); return 0; @@ -129,6 +161,7 @@ int BPF_PROG(tcp_sendmsg_exit, struct sock *sk, struct msghdr *msg, size_t size, SEC("fexit/tcp_sendpage") int BPF_PROG(tcp_sendpage_exit, struct sock *sk, struct page *page, int offset, size_t size, int flags, int sent) { +RETURN_IF_NOT_IN_SYSPROBE_TASK("fexit/tcp_sendpage"); if (sent < 0) { log_debug("fexit/tcp_sendpage: err=%d\n", sent); return 0; @@ -153,6 +186,7 @@ int BPF_PROG(tcp_sendpage_exit, struct sock *sk, struct page *page, int offset, SEC("fexit/udp_sendpage") int BPF_PROG(udp_sendpage_exit, struct sock *sk, struct page *page, int offset, size_t size, int flags, int sent) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fexit/udp_sendpage"); if (sent < 0) { log_debug("fexit/udp_sendpage: err=%d\n", sent); return 0; @@ -171,6 +205,7 @@ int BPF_PROG(udp_sendpage_exit, struct sock *sk, struct page *page, int offset, SEC("fexit/tcp_recvmsg") int BPF_PROG(tcp_recvmsg_exit, struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len, int copied) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fexit/tcp_recvmsg"); if (copied < 0) { // error return 0; } @@ -181,6 +216,7 @@ int BPF_PROG(tcp_recvmsg_exit, struct sock *sk, struct msghdr *msg, size_t len, SEC("fexit/tcp_recvmsg") int BPF_PROG(tcp_recvmsg_exit_pre_5_19_0, struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len, int copied) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fexit/tcp_recvmsg"); if (copied < 0) { // error return 0; } @@ -191,6 +227,7 @@ int BPF_PROG(tcp_recvmsg_exit_pre_5_19_0, struct sock *sk, struct msghdr *msg, s SEC("fentry/tcp_close") int BPF_PROG(tcp_close, struct sock *sk, long timeout) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fentry/tcp_close"); conn_tuple_t t = {}; u64 pid_tgid = bpf_get_current_pid_tgid(); @@ -206,12 +243,13 @@ int BPF_PROG(tcp_close, struct sock *sk, long timeout) { } log_debug("fentry/tcp_close: netns: %u, sport: %u, dport: %u\n", t.netns, t.sport, t.dport); - cleanup_conn(&t, sk); + cleanup_conn(ctx, &t, sk); return 0; } SEC("fexit/tcp_close") int BPF_PROG(tcp_close_exit, struct sock *sk, long timeout) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fexit/tcp_close"); flush_conn_close_if_full(ctx); return 0; } @@ -234,6 +272,7 @@ static __always_inline int handle_udp_send(struct sock *sk, int sent) { SEC("kprobe/udp_v6_send_skb") int kprobe__udp_v6_send_skb(struct pt_regs *ctx) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("kprobe/udp_v6_send_skb"); struct sk_buff *skb = (struct sk_buff*) PT_REGS_PARM1(ctx); struct flowi6 *fl6 = (struct flowi6*) PT_REGS_PARM2(ctx); u64 pid_tgid = bpf_get_current_pid_tgid(); @@ -254,11 +293,13 @@ int kprobe__udp_v6_send_skb(struct pt_regs *ctx) { SEC("fexit/udpv6_sendmsg") int BPF_PROG(udpv6_sendmsg_exit, struct sock *sk, struct msghdr *msg, size_t len, int sent) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fexit/udpv6_sendmsg"); return handle_udp_send(sk, sent); } SEC("kprobe/udp_send_skb") int kprobe__udp_send_skb(struct pt_regs *ctx) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("kprobe/udp_send_skb"); struct sk_buff *skb = (struct sk_buff*) PT_REGS_PARM1(ctx); struct flowi4 *fl4 = (struct flowi4*) PT_REGS_PARM2(ctx); u64 pid_tgid = bpf_get_current_pid_tgid(); @@ -279,6 +320,7 @@ int kprobe__udp_send_skb(struct pt_regs *ctx) { SEC("fexit/udp_sendmsg") int BPF_PROG(udp_sendmsg_exit, struct sock *sk, struct msghdr *msg, size_t len, int sent) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fexit/udp_sendmsg"); return handle_udp_send(sk, sent); } @@ -301,12 +343,14 @@ static __always_inline int handle_udp_recvmsg_ret() { SEC("fentry/udp_recvmsg") int BPF_PROG(udp_recvmsg, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fentry/udp_recvmsg"); log_debug("fentry/udp_recvmsg: flags: %x\n", flags); return handle_udp_recvmsg(sk, flags); } SEC("fentry/udpv6_recvmsg") int BPF_PROG(udpv6_recvmsg, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fentry/udpv6_recvmsg"); log_debug("fentry/udpv6_recvmsg: flags: %x\n", flags); return handle_udp_recvmsg(sk, flags); } @@ -315,6 +359,7 @@ SEC("fexit/udp_recvmsg") int BPF_PROG(udp_recvmsg_exit, struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len, int copied) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fexit/udp_recvmsg"); return handle_udp_recvmsg_ret(); } @@ -322,6 +367,7 @@ SEC("fexit/udp_recvmsg") int BPF_PROG(udp_recvmsg_exit_pre_5_19_0, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len, int copied) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fexit/udp_recvmsg"); return handle_udp_recvmsg_ret(); } @@ -329,6 +375,7 @@ SEC("fexit/udpv6_recvmsg") int BPF_PROG(udpv6_recvmsg_exit, struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len, int copied) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fexit/udpv6_recvmsg"); return handle_udp_recvmsg_ret(); } @@ -336,26 +383,31 @@ SEC("fexit/udpv6_recvmsg") int BPF_PROG(udpv6_recvmsg_exit_pre_5_19_0, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len, int copied) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fexit/udpv6_recvmsg"); return handle_udp_recvmsg_ret(); } SEC("fentry/skb_free_datagram_locked") int BPF_PROG(skb_free_datagram_locked, struct sock *sk, struct sk_buff *skb) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fentry/skb_free_datagram_locked"); return handle_skb_consume_udp(sk, skb, 0); } SEC("fentry/__skb_free_datagram_locked") int BPF_PROG(__skb_free_datagram_locked, struct sock *sk, struct sk_buff *skb, int len) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fentry/__skb_free_datagram_locked"); return handle_skb_consume_udp(sk, skb, len); } SEC("fentry/skb_consume_udp") int BPF_PROG(skb_consume_udp, struct sock *sk, struct sk_buff *skb, int len) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fentry/skb_consume_udp"); return handle_skb_consume_udp(sk, skb, len); } SEC("fentry/tcp_retransmit_skb") int BPF_PROG(tcp_retransmit_skb, struct sock *sk, struct sk_buff *skb, int segs, int err) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fentry/tcp_retransmit_skb"); log_debug("fexntry/tcp_retransmit\n"); u64 tid = bpf_get_current_pid_tgid(); tcp_retransmit_skb_args_t args = {}; @@ -371,6 +423,7 @@ int BPF_PROG(tcp_retransmit_skb, struct sock *sk, struct sk_buff *skb, int segs, SEC("fexit/tcp_retransmit_skb") int BPF_PROG(tcp_retransmit_skb_exit, struct sock *sk, struct sk_buff *skb, int segs, int err) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fexit/tcp_retransmit_skb"); log_debug("fexit/tcp_retransmit\n"); u64 tid = bpf_get_current_pid_tgid(); if (err < 0) { @@ -394,6 +447,7 @@ int BPF_PROG(tcp_retransmit_skb_exit, struct sock *sk, struct sk_buff *skb, int SEC("fentry/tcp_set_state") int BPF_PROG(tcp_set_state, struct sock *sk, int state) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fentry/tcp_set_state"); // For now we're tracking only TCP_ESTABLISHED if (state != TCP_ESTABLISHED) { return 0; @@ -413,6 +467,7 @@ int BPF_PROG(tcp_set_state, struct sock *sk, int state) { SEC("fentry/tcp_connect") int BPF_PROG(tcp_connect, struct sock *sk) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fentry/tcp_connect"); u64 pid_tgid = bpf_get_current_pid_tgid(); log_debug("fentry/tcp_connect: tgid: %u, pid: %u\n", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); @@ -423,6 +478,7 @@ int BPF_PROG(tcp_connect, struct sock *sk) { SEC("fentry/tcp_finish_connect") int BPF_PROG(tcp_finish_connect, struct sock *sk, struct sk_buff *skb, int rc) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fentry/tcp_finish_connect"); u64 *pid_tgid_p = bpf_map_lookup_elem(&tcp_ongoing_connect_pid, &sk); if (!pid_tgid_p) { return 0; @@ -447,6 +503,7 @@ int BPF_PROG(tcp_finish_connect, struct sock *sk, struct sk_buff *skb, int rc) { SEC("fexit/inet_csk_accept") int BPF_PROG(inet_csk_accept_exit, struct sock *_sk, int flags, int *err, bool kern, struct sock *sk) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fexit/inet_csk_accept"); if (sk == NULL) { return 0; } @@ -471,6 +528,7 @@ int BPF_PROG(inet_csk_accept_exit, struct sock *_sk, int flags, int *err, bool k SEC("fentry/inet_csk_listen_stop") int BPF_PROG(inet_csk_listen_stop_enter, struct sock *sk) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fentry/inet_csk_listen_stop"); __u16 lport = read_sport(sk); if (lport == 0) { log_debug("ERR(inet_csk_listen_stop): lport is 0 \n"); @@ -485,14 +543,14 @@ int BPF_PROG(inet_csk_listen_stop_enter, struct sock *sk) { return 0; } -static __always_inline int handle_udp_destroy_sock(struct sock *sk) { +static __always_inline int handle_udp_destroy_sock(void *ctx, struct sock *sk) { conn_tuple_t tup = {}; u64 pid_tgid = bpf_get_current_pid_tgid(); int valid_tuple = read_conn_tuple(&tup, sk, pid_tgid, CONN_TYPE_UDP); __u16 lport = 0; if (valid_tuple) { - cleanup_conn(&tup, sk); + cleanup_conn(ctx, &tup, sk); lport = tup.sport; } else { // get the port for the current sock @@ -519,22 +577,26 @@ static __always_inline int handle_udp_destroy_sock(struct sock *sk) { SEC("fentry/udp_destroy_sock") int BPF_PROG(udp_destroy_sock, struct sock *sk) { - return handle_udp_destroy_sock(sk); + RETURN_IF_NOT_IN_SYSPROBE_TASK("fentry/udp_destroy_sock"); + return handle_udp_destroy_sock(ctx, sk); } SEC("fentry/udpv6_destroy_sock") int BPF_PROG(udpv6_destroy_sock, struct sock *sk) { - return handle_udp_destroy_sock(sk); + RETURN_IF_NOT_IN_SYSPROBE_TASK("fentry/udpv6_destroy_sock"); + return handle_udp_destroy_sock(ctx, sk); } SEC("fexit/udp_destroy_sock") int BPF_PROG(udp_destroy_sock_exit, struct sock *sk) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fexit/udp_destroy_sock"); flush_conn_close_if_full(ctx); return 0; } SEC("fexit/udpv6_destroy_sock") int BPF_PROG(udpv6_destroy_sock_exit, struct sock *sk) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fexit/udpv6_destroy_sock"); flush_conn_close_if_full(ctx); return 0; } @@ -582,12 +644,14 @@ static __always_inline int sys_exit_bind(struct socket *sock, struct sockaddr *a SEC("fexit/inet_bind") int BPF_PROG(inet_bind_exit, struct socket *sock, struct sockaddr *uaddr, int addr_len, int rc) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fexit/inet_bind"); log_debug("fexit/inet_bind: rc=%d\n", rc); return sys_exit_bind(sock, uaddr, rc); } SEC("fexit/inet6_bind") int BPF_PROG(inet6_bind_exit, struct socket *sock, struct sockaddr *uaddr, int addr_len, int rc) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fexit/inet6_bind"); log_debug("fexit/inet6_bind: rc=%d\n", rc); return sys_exit_bind(sock, uaddr, rc); } @@ -597,6 +661,7 @@ int BPF_PROG(inet6_bind_exit, struct socket *sock, struct sockaddr *uaddr, int a // * an index of struct sock* to pid_fd_t; SEC("fexit/sockfd_lookup_light") int BPF_PROG(sockfd_lookup_light_exit, int fd, int *err, int *fput_needed, struct socket *socket) { + RETURN_IF_NOT_IN_SYSPROBE_TASK("fexit/sockfd_lookup_light"); u64 pid_tgid = bpf_get_current_pid_tgid(); // Check if have already a map entry for this pid_fd_t // TODO: This lookup eliminates *4* map operations for existing entries diff --git a/pkg/network/ebpf/c/protocols/classification/dispatcher-helpers.h b/pkg/network/ebpf/c/protocols/classification/dispatcher-helpers.h index 9950824144e68..43dfc75f7744b 100644 --- a/pkg/network/ebpf/c/protocols/classification/dispatcher-helpers.h +++ b/pkg/network/ebpf/c/protocols/classification/dispatcher-helpers.h @@ -88,7 +88,6 @@ static __always_inline void protocol_dispatcher_entrypoint(struct __sk_buff *skb // should never happen, but it is required by the eBPF verifier return; } - // TODO: consider adding early return if `is_layer_known(stack, LAYER_ENCRYPTION)` protocol_t cur_fragment_protocol = get_protocol_from_stack(stack, LAYER_APPLICATION); diff --git a/pkg/network/ebpf/c/protocols/classification/routing.h b/pkg/network/ebpf/c/protocols/classification/routing.h index cd9106acab03a..27d70da6f50d1 100644 --- a/pkg/network/ebpf/c/protocols/classification/routing.h +++ b/pkg/network/ebpf/c/protocols/classification/routing.h @@ -6,6 +6,12 @@ #include "protocols/classification/stack-helpers.h" #include "protocols/classification/routing-helpers.h" +// This entry point is needed to bypass a memory limit on socket filters. +// There is a limitation on number of instructions can be attached to a socket filter, +// as we classify more protocols, we reached that limit, thus we workaround it +// by using tail call. +BPF_PROG_ARRAY(classification_progs, CLASSIFICATION_PROG_MAX) + // This function essentially encodes all routing aspects of tail-calls. // // For example, if this function gets called from `CLASSIFICATION_QUEUES_PROG` diff --git a/pkg/network/ebpf/c/protocols/classification/tracer-maps.h b/pkg/network/ebpf/c/protocols/classification/tracer-maps.h deleted file mode 100644 index 97934bc4f5bf6..0000000000000 --- a/pkg/network/ebpf/c/protocols/classification/tracer-maps.h +++ /dev/null @@ -1,35 +0,0 @@ -#ifndef __PROTOCOL_CLASSIFICATION_TRACER_MAPS_H -#define __PROTOCOL_CLASSIFICATION_TRACER_MAPS_H - -#include "conn_tuple.h" -#include "map-defs.h" - -#include "protocols/classification/shared-tracer-maps.h" - -// Maps skb connection tuple to socket connection tuple. -// On ingress, skb connection tuple is pre NAT, and socket connection tuple is post NAT, and on egress, the opposite. -// We track the lifecycle of socket using tracepoint net/net_dev_queue. -// Some protocol can be classified in a single direction (for example HTTP/2 can be classified only by the first 24 bytes -// sent on the hand shake), and if we have NAT, then the conn tuple we extract from sk_buff will be different than the -// one we extract from the sock object, and then we are not able to correctly classify those protocols. -// To overcome those problems, we save two maps that translates from conn tuple of sk_buff to conn tuple of sock* and vice -// versa (the vice versa is used for cleanup purposes). -BPF_HASH_MAP(conn_tuple_to_socket_skb_conn_tuple, conn_tuple_t, conn_tuple_t, 0) - -// This entry point is needed to bypass a memory limit on socket filters. -// There is a limitation on number of instructions can be attached to a socket filter, -// as we classify more protocols, we reached that limit, thus we workaround it -// by using tail call. -BPF_PROG_ARRAY(classification_progs, CLASSIFICATION_PROG_MAX) - -// Map to hold conn_tuple_t parameter for tcp_close calls -// to be used in kretprobe/tcp_close. -BPF_HASH_MAP(tcp_close_args, __u64, conn_tuple_t, 1024) - -// This program array is needed to bypass a memory limit on socket filters. -// There is a limitation on number of instructions can be attached to a socket filter, -// as we dispatching more protocols, we reached that limit, thus we workaround it -// by using tail call. -BPF_PROG_ARRAY(tcp_close_progs, 1) - -#endif diff --git a/pkg/network/ebpf/c/protocols/http/http.h b/pkg/network/ebpf/c/protocols/http/http.h index cfd35dc3bde9f..09e00bfd03352 100644 --- a/pkg/network/ebpf/c/protocols/http/http.h +++ b/pkg/network/ebpf/c/protocols/http/http.h @@ -164,10 +164,14 @@ static __always_inline bool http_allow_packet(http_transaction_t *http, struct _ return false; } - // if payload data is empty or if this is an encrypted packet, we only - // process it if the packet represents a TCP termination + protocol_stack_t *stack = get_protocol_stack(&http->tup); + if (!stack) { + return false; + } bool empty_payload = skb_info->data_off == skb->len; - if (empty_payload || http->tup.sport == HTTPS_PORT || http->tup.dport == HTTPS_PORT) { + if (empty_payload || is_fully_classified(stack) || is_protocol_layer_known(stack, LAYER_ENCRYPTION)) { + // if the payload data is empty or encrypted packet, we only + // process it if the packet represents a TCP termination return skb_info->tcp_flags&(TCPHDR_FIN|TCPHDR_RST); } diff --git a/pkg/network/ebpf/c/protocols/tls/https.h b/pkg/network/ebpf/c/protocols/tls/https.h index cce61ccd1afb8..fd8107dbbd074 100644 --- a/pkg/network/ebpf/c/protocols/tls/https.h +++ b/pkg/network/ebpf/c/protocols/tls/https.h @@ -27,8 +27,6 @@ #include "protocols/tls/tags-types.h" #include "protocols/tls/go-tls-types.h" -#define HTTPS_PORT 443 - /* this function is called by all TLS hookpoints (OpenSSL, GnuTLS and GoTLS) and */ /* it's used for classify the subset of protocols that is supported by `classify_protocol_for_dispatcher` */ static __always_inline void classify_decrypted_payload(conn_tuple_t *t, void *buffer, size_t len) { diff --git a/pkg/network/ebpf/c/tracer.c b/pkg/network/ebpf/c/tracer.c index 504d8d05651ac..14bdb72de596f 100644 --- a/pkg/network/ebpf/c/tracer.c +++ b/pkg/network/ebpf/c/tracer.c @@ -13,9 +13,9 @@ #include "skb.h" #include "sockfd.h" #include "tracer/events.h" +#include "tracer/maps.h" #include "tracer/port.h" #include "tracer/tcp_recv.h" -#include "protocols/classification/tracer-maps.h" #include "protocols/classification/protocol-classification.h" SEC("socket/classifier_entry") @@ -201,7 +201,7 @@ int kprobe__tcp_close(struct pt_regs *ctx) { } log_debug("kprobe/tcp_close: netns: %u, sport: %u, dport: %u\n", t.netns, t.sport, t.dport); - cleanup_conn(&t, sk); + cleanup_conn(ctx, &t, sk); // If protocol classification is disabled, then we don't have kretprobe__tcp_close_clean_protocols hook // so, there is no one to use the map and clean it. @@ -360,8 +360,25 @@ int kprobe__ip6_make_skb__pre_4_7_0(struct pt_regs *ctx) { return 0; } +SEC("kprobe/ip6_make_skb") +int kprobe__ip6_make_skb__pre_5_18_0(struct pt_regs *ctx) { + struct sock *sk = (struct sock *)PT_REGS_PARM1(ctx); + size_t len = (size_t)PT_REGS_PARM4(ctx); + struct flowi6 *fl6 = (struct flowi6 *)PT_REGS_PARM7(ctx); + + u64 pid_tgid = bpf_get_current_pid_tgid(); + ip_make_skb_args_t args = {}; + bpf_probe_read_kernel_with_telemetry(&args.sk, sizeof(args.sk), &sk); + bpf_probe_read_kernel_with_telemetry(&args.len, sizeof(args.len), &len); + bpf_probe_read_kernel_with_telemetry(&args.fl6, sizeof(args.fl6), &fl6); + bpf_map_update_with_telemetry(ip_make_skb_args, &pid_tgid, &args, BPF_ANY); + return 0; +} + #endif // COMPILE_CORE || COMPILE_PREBUILT +#if defined(COMPILE_RUNTIME) || defined(COMPILE_CORE) + SEC("kprobe/ip6_make_skb") int kprobe__ip6_make_skb(struct pt_regs *ctx) { struct sock *sk = (struct sock *)PT_REGS_PARM1(ctx); @@ -370,7 +387,10 @@ int kprobe__ip6_make_skb(struct pt_regs *ctx) { // commit: https://github.com/torvalds/linux/commit/f37a4cc6bb0ba08c2d9fd7d18a1da87161cbb7f9 struct inet_cork_full *cork_full = (struct inet_cork_full *)PT_REGS_PARM9(ctx); struct flowi6 *fl6 = &cork_full->fl.u.ip6; -#elif !defined(COMPILE_RUNTIME) || LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) +#elif defined(COMPILE_CORE) + struct inet_cork_full *cork_full = (struct inet_cork_full *)PT_REGS_PARM9(ctx); + struct flowi6 *fl6 = (struct flowi6 *)__builtin_preserve_access_index(&cork_full->fl.u.ip6); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) // commit: https://github.com/torvalds/linux/commit/26879da58711aa604a1b866cbeedd7e0f78f90ad // changed the arguments to ip6_make_skb and introduced the struct ipcm6_cookie struct flowi6 *fl6 = (struct flowi6 *)PT_REGS_PARM7(ctx); @@ -384,10 +404,11 @@ int kprobe__ip6_make_skb(struct pt_regs *ctx) { bpf_probe_read_kernel_with_telemetry(&args.len, sizeof(args.len), &len); bpf_probe_read_kernel_with_telemetry(&args.fl6, sizeof(args.fl6), &fl6); bpf_map_update_with_telemetry(ip_make_skb_args, &pid_tgid, &args, BPF_ANY); - return 0; } +#endif // COMPILE_RUNTIME || COMPILE_CORE + SEC("kretprobe/ip6_make_skb") int kretprobe__ip6_make_skb(struct pt_regs *ctx) { u64 pid_tgid = bpf_get_current_pid_tgid(); @@ -933,14 +954,14 @@ int kprobe__inet_csk_listen_stop(struct pt_regs *ctx) { return 0; } -static __always_inline int handle_udp_destroy_sock(struct sock *skp) { +static __always_inline int handle_udp_destroy_sock(void *ctx, struct sock *skp) { conn_tuple_t tup = {}; u64 pid_tgid = bpf_get_current_pid_tgid(); int valid_tuple = read_conn_tuple(&tup, skp, pid_tgid, CONN_TYPE_UDP); __u16 lport = 0; if (valid_tuple) { - cleanup_conn(&tup, skp); + cleanup_conn(ctx, &tup, skp); lport = tup.sport; } else { lport = read_sport(skp); @@ -964,13 +985,13 @@ static __always_inline int handle_udp_destroy_sock(struct sock *skp) { SEC("kprobe/udp_destroy_sock") int kprobe__udp_destroy_sock(struct pt_regs *ctx) { struct sock *sk = (struct sock *)PT_REGS_PARM1(ctx); - return handle_udp_destroy_sock(sk); + return handle_udp_destroy_sock(ctx, sk); } SEC("kprobe/udpv6_destroy_sock") int kprobe__udpv6_destroy_sock(struct pt_regs *ctx) { struct sock *sk = (struct sock *)PT_REGS_PARM1(ctx); - return handle_udp_destroy_sock(sk); + return handle_udp_destroy_sock(ctx, sk); } SEC("kretprobe/udp_destroy_sock") diff --git a/pkg/network/ebpf/c/tracer/events.h b/pkg/network/ebpf/c/tracer/events.h index a2f1f353bc0f5..5320737f90abc 100644 --- a/pkg/network/ebpf/c/tracer/events.h +++ b/pkg/network/ebpf/c/tracer/events.h @@ -10,7 +10,6 @@ #include "tracer/stats.h" #include "tracer/telemetry.h" #include "cookie.h" -#include "protocols/classification/tracer-maps.h" #include "ip.h" #include "port_range.h" @@ -35,7 +34,7 @@ static __always_inline void clean_protocol_classification(conn_tuple_t *tup) { bpf_map_delete_elem(&conn_tuple_to_socket_skb_conn_tuple, &conn_tuple); } -static __always_inline void cleanup_conn(conn_tuple_t *tup, struct sock *sk) { +static __always_inline void cleanup_conn(void *ctx, conn_tuple_t *tup, struct sock *sk) { u32 cpu = bpf_get_smp_processor_id(); // Will hold the full connection data to send through the perf buffer @@ -103,12 +102,16 @@ static __always_inline void cleanup_conn(conn_tuple_t *tup, struct sock *sk) { } // If we hit this section it means we had one or more interleaved tcp_close calls. - // This could result in a missed tcp_close event, so we track it using our telemetry map. + // We send the connection outside of a batch anyway. This is likely not as + // frequent of a case to cause performance issues and avoid cases where + // we drop whole connections, which impacts things USM connection matching. + bpf_perf_event_output(ctx, &conn_close_event, cpu, &conn, sizeof(conn)); + if (is_tcp) { - increment_telemetry_count(missed_tcp_close); + increment_telemetry_count(unbatched_tcp_close); } if (is_udp) { - increment_telemetry_count(missed_udp_close); + increment_telemetry_count(unbatched_udp_close); } } diff --git a/pkg/network/ebpf/c/tracer/maps.h b/pkg/network/ebpf/c/tracer/maps.h index 50cd48ae451f2..020a65af1e00b 100644 --- a/pkg/network/ebpf/c/tracer/maps.h +++ b/pkg/network/ebpf/c/tracer/maps.h @@ -105,4 +105,24 @@ BPF_HASH_MAP(pending_tcp_retransmit_skb, __u64, tcp_retransmit_skb_args_t, 8192) // corresponding kretprobes BPF_HASH_MAP(ip_make_skb_args, __u64, ip_make_skb_args_t, 1024) +// Maps skb connection tuple to socket connection tuple. +// On ingress, skb connection tuple is pre NAT, and socket connection tuple is post NAT, and on egress, the opposite. +// We track the lifecycle of socket using tracepoint net/net_dev_queue. +// Some protocol can be classified in a single direction (for example HTTP/2 can be classified only by the first 24 bytes +// sent on the hand shake), and if we have NAT, then the conn tuple we extract from sk_buff will be different than the +// one we extract from the sock object, and then we are not able to correctly classify those protocols. +// To overcome those problems, we save two maps that translates from conn tuple of sk_buff to conn tuple of sock* and vice +// versa (the vice versa is used for cleanup purposes). +BPF_HASH_MAP(conn_tuple_to_socket_skb_conn_tuple, conn_tuple_t, conn_tuple_t, 0) + +// Map to hold conn_tuple_t parameter for tcp_close calls +// to be used in kretprobe/tcp_close. +BPF_HASH_MAP(tcp_close_args, __u64, conn_tuple_t, 1024) + +// This program array is needed to bypass a memory limit on socket filters. +// There is a limitation on number of instructions can be attached to a socket filter, +// as we dispatching more protocols, we reached that limit, thus we workaround it +// by using tail call. +BPF_PROG_ARRAY(tcp_close_progs, 1) + #endif diff --git a/pkg/network/ebpf/c/tracer/stats.h b/pkg/network/ebpf/c/tracer/stats.h index 416fa924664f8..ef16fcb0846ad 100644 --- a/pkg/network/ebpf/c/tracer/stats.h +++ b/pkg/network/ebpf/c/tracer/stats.h @@ -11,8 +11,8 @@ #include "cookie.h" #include "sock.h" #include "port_range.h" +#include "protocols/classification/shared-tracer-maps.h" #include "protocols/classification/stack-helpers.h" -#include "protocols/classification/tracer-maps.h" #include "protocols/tls/tags-types.h" #include "ip.h" #include "skb.h" diff --git a/pkg/network/ebpf/c/tracer/telemetry.h b/pkg/network/ebpf/c/tracer/telemetry.h index 32471409ed169..3686847f8bcee 100644 --- a/pkg/network/ebpf/c/tracer/telemetry.h +++ b/pkg/network/ebpf/c/tracer/telemetry.h @@ -17,8 +17,8 @@ enum telemetry_counter { tcp_failed_connect, - missed_tcp_close, - missed_udp_close, + unbatched_tcp_close, + unbatched_udp_close, udp_send_processed, udp_send_missed, udp_dropped_conns, @@ -36,11 +36,11 @@ static __always_inline void increment_telemetry_count(enum telemetry_counter cou case tcp_failed_connect: __sync_fetch_and_add(&val->tcp_failed_connect, 1); break; - case missed_tcp_close: - __sync_fetch_and_add(&val->missed_tcp_close, 1); + case unbatched_tcp_close: + __sync_fetch_and_add(&val->unbatched_tcp_close, 1); break; - case missed_udp_close: - __sync_fetch_and_add(&val->missed_udp_close, 1); + case unbatched_udp_close: + __sync_fetch_and_add(&val->unbatched_udp_close, 1); break; case udp_send_processed: __sync_fetch_and_add(&val->udp_sends_processed, 1); diff --git a/pkg/network/ebpf/c/tracer/tracer.h b/pkg/network/ebpf/c/tracer/tracer.h index df9d10c724f2e..ab548d144cd27 100644 --- a/pkg/network/ebpf/c/tracer/tracer.h +++ b/pkg/network/ebpf/c/tracer/tracer.h @@ -43,8 +43,6 @@ typedef struct { __u64 sent_packets; __u64 recv_packets; __u8 direction; - // keep the conn_tags u8 to keep the struct slim - __u8 conn_tags; protocol_stack_t protocol_stack; } conn_stats_ts_t; @@ -93,8 +91,8 @@ typedef struct { typedef struct { __u64 tcp_failed_connect; __u64 tcp_sent_miscounts; - __u64 missed_tcp_close; - __u64 missed_udp_close; + __u64 unbatched_tcp_close; + __u64 unbatched_udp_close; __u64 udp_sends_processed; __u64 udp_sends_missed; __u64 udp_dropped_conns; diff --git a/pkg/network/ebpf/kprobe_types.go b/pkg/network/ebpf/kprobe_types.go index 42fa6087dfe35..d6221a3677ae8 100644 --- a/pkg/network/ebpf/kprobe_types.go +++ b/pkg/network/ebpf/kprobe_types.go @@ -51,6 +51,8 @@ const ( const BatchSize = C.CONN_CLOSED_BATCH_SIZE const SizeofBatch = C.sizeof_batch_t +const SizeofConn = C.sizeof_conn_t + type ClassificationProgram = uint32 const ( diff --git a/pkg/network/ebpf/kprobe_types_linux.go b/pkg/network/ebpf/kprobe_types_linux.go index d4e180fdade18..a711b6f3366ed 100644 --- a/pkg/network/ebpf/kprobe_types_linux.go +++ b/pkg/network/ebpf/kprobe_types_linux.go @@ -30,9 +30,8 @@ type ConnStats struct { Sent_packets uint64 Recv_packets uint64 Direction uint8 - Conn_tags uint8 Protocol_stack ProtocolStack - Pad_cgo_0 [2]byte + Pad_cgo_0 [3]byte } type Conn struct { Tup ConnTuple @@ -50,8 +49,8 @@ type Batch struct { type Telemetry struct { Tcp_failed_connect uint64 Tcp_sent_miscounts uint64 - Missed_tcp_close uint64 - Missed_udp_close uint64 + Unbatched_tcp_close uint64 + Unbatched_udp_close uint64 Udp_sends_processed uint64 Udp_sends_missed uint64 Udp_dropped_conns uint64 @@ -102,6 +101,8 @@ const ( const BatchSize = 0x4 const SizeofBatch = 0x1f0 +const SizeofConn = 0x78 + type ClassificationProgram = uint32 const ( diff --git a/pkg/network/ebpf/probes/probes.go b/pkg/network/ebpf/probes/probes.go index 7bd1460c7f0ad..d5abe7b41312d 100644 --- a/pkg/network/ebpf/probes/probes.go +++ b/pkg/network/ebpf/probes/probes.go @@ -97,6 +97,8 @@ const ( IP6MakeSkbReturn ProbeFuncName = "kretprobe__ip6_make_skb" // IP6MakeSkbPre470 traces ip6_make_skb on kernel versions < 4.7 IP6MakeSkbPre470 ProbeFuncName = "kprobe__ip6_make_skb__pre_4_7_0" + // IP6MakeSkbPre5180 traces ip6_make_skb on kernel versions < 5.18 + IP6MakeSkbPre5180 ProbeFuncName = "kprobe__ip6_make_skb__pre_5_18_0" // UDPRecvMsg traces the udp_recvmsg() system call UDPRecvMsg ProbeFuncName = "kprobe__udp_recvmsg" diff --git a/pkg/network/protocols/grpc/monitor_test.go b/pkg/network/protocols/grpc/monitor_test.go index fc86e9129fb48..b64fc5dc048d0 100644 --- a/pkg/network/protocols/grpc/monitor_test.go +++ b/pkg/network/protocols/grpc/monitor_test.go @@ -14,6 +14,7 @@ import ( "testing" "time" + "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/stretchr/testify/require" @@ -30,6 +31,10 @@ const ( ) func TestGRPCScenarios(t *testing.T) { + ebpftest.TestBuildModes(t, []ebpftest.BuildMode{ebpftest.Prebuilt, ebpftest.RuntimeCompiled, ebpftest.CORE}, "", testGRPCScenarios) +} + +func testGRPCScenarios(t *testing.T) { cfg := config.New() cfg.EnableHTTPMonitoring = true cfg.EnableHTTP2Monitoring = true diff --git a/pkg/network/protocols/http/incomplete_stats.go b/pkg/network/protocols/http/incomplete_stats.go index deeed5747d7a4..f9271dfd2a261 100644 --- a/pkg/network/protocols/http/incomplete_stats.go +++ b/pkg/network/protocols/http/incomplete_stats.go @@ -42,10 +42,11 @@ const defaultMinAge = 30 * time.Second // request segment at "t0" with response segment "t3". This is why we buffer data here for 30 seconds // and then sort all events by their timestamps before joining them. type incompleteBuffer struct { - data map[types.ConnectionKey]*txParts - maxEntries int - telemetry *Telemetry - minAgeNano int64 + data map[types.ConnectionKey]*txParts + totalEntries int + maxEntries int + telemetry *Telemetry + minAgeNano int64 } type txParts struct { @@ -61,15 +62,26 @@ func newTXParts() *txParts { } func newIncompleteBuffer(c *config.Config, telemetry *Telemetry) *incompleteBuffer { + // Only set aside a fraction of MaxHTTPBuffered for incomplete data + // as this should only be used rarely (as described in the example above). + // If our telemetry indicates that this buffer is filling up often, we need + // to better understand what is going on and reassess our approach + maxEntries := c.MaxHTTPStatsBuffered / 10 + return &incompleteBuffer{ data: make(map[types.ConnectionKey]*txParts), - maxEntries: c.MaxHTTPStatsBuffered, + maxEntries: maxEntries, telemetry: telemetry, minAgeNano: defaultMinAge.Nanoseconds(), } } func (b *incompleteBuffer) Add(tx HttpTX) { + if b.totalEntries >= b.maxEntries { + b.telemetry.dropped.Add(1) + return + } + connTuple := tx.ConnTuple() key := types.ConnectionKey{ SrcIPHigh: connTuple.SrcIPHigh, @@ -79,14 +91,11 @@ func (b *incompleteBuffer) Add(tx HttpTX) { parts, ok := b.data[key] if !ok { - if len(b.data) >= b.maxEntries { - b.telemetry.dropped.Add(1) - return - } - parts = newTXParts() b.data[key] = parts } + b.totalEntries++ + b.telemetry.newIncomplete.Add(1) // copy underlying httpTX value. this is now needed because these objects are // now coming directly from pooled perf records @@ -114,7 +123,9 @@ func (b *incompleteBuffer) Flush(now time.Time) []HttpTX { nowUnix = now.UnixNano() ) + b.telemetry.totalIncomplete.Add(int64(b.totalEntries)) b.data = make(map[types.ConnectionKey]*txParts) + b.totalEntries = 0 for key, parts := range previous { // TODO: in this loop we're sorting all transactions at once, but we could also // consider sorting data during insertion time (using a tree-like structure, for example) @@ -147,12 +158,14 @@ func (b *incompleteBuffer) Flush(now time.Time) []HttpTX { parts := newTXParts() parts.requests = append(parts.requests, keep...) b.data[key] = parts + b.totalEntries += len(parts.requests) break } i++ } } + b.telemetry.joinedIncomplete.Add(int64(len(joined))) return joined } diff --git a/pkg/network/protocols/http/incomplete_stats_test.go b/pkg/network/protocols/http/incomplete_stats_test.go index dd6f05382d4ae..494f0bf3e9f4f 100644 --- a/pkg/network/protocols/http/incomplete_stats_test.go +++ b/pkg/network/protocols/http/incomplete_stats_test.go @@ -8,10 +8,11 @@ package http import ( - "golang.org/x/net/http2/hpack" "testing" "time" + "golang.org/x/net/http2/hpack" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -76,6 +77,38 @@ func TestOrphanEntries(t *testing.T) { }) } +func TestBufferLimit(t *testing.T) { + now := time.Now() + tel, err := NewTelemetry() + require.NoError(t, err) + + buffer := newIncompleteBuffer(config.New(), tel) + + // Attempt to insert more data than allowed + // Since all incomplete parts share the same tuple, this will generate + // *one* map key, with many "parts" appended to it + // + // We're asserting here that our buffer counts the total number of entries + // included in the nested structures, and not only the map keys + for i := 0; i < 2*buffer.maxEntries; i++ { + request := &EbpfHttpTx{ + Request_fragment: requestFragment([]byte("GET /foo/bar")), + Request_started: uint64(now.UnixNano()), + } + request.Tup.Sport = 60000 + buffer.Add(request) + } + + // Count total entries + total := 0 + for _, parts := range buffer.data { + total += len(parts.requests) + len(parts.responses) + } + + // Assert that buffer honored the max number of entries allowed + assert.Equal(t, buffer.maxEntries, total) +} + func TestHTTP2Path(t *testing.T) { t.Run("validate http2 path backslash", func(t *testing.T) { // create a buffer to store the encoded data diff --git a/pkg/network/protocols/http/telemetry.go b/pkg/network/protocols/http/telemetry.go index 896f8e08182a2..b6aa655457de8 100644 --- a/pkg/network/protocols/http/telemetry.go +++ b/pkg/network/protocols/http/telemetry.go @@ -24,6 +24,10 @@ type Telemetry struct { rejected *libtelemetry.Metric // this happens when an user-defined reject-filter matches a request malformed *libtelemetry.Metric // this happens when the request doesn't have the expected format aggregations *libtelemetry.Metric + + newIncomplete *libtelemetry.Metric + totalIncomplete *libtelemetry.Metric + joinedIncomplete *libtelemetry.Metric } func NewTelemetry() (*Telemetry, error) { @@ -42,6 +46,11 @@ func NewTelemetry() (*Telemetry, error) { hits5XX: metricGroup.NewMetric("hits5xx"), aggregations: metricGroup.NewMetric("aggregations"), + // metrics from `incompleteBuffer` + newIncomplete: metricGroup.NewMetric("new_incomplete"), + totalIncomplete: metricGroup.NewMetric("total_incomplete"), + joinedIncomplete: metricGroup.NewMetric("joined_incomplete"), + // these metrics are also exported as statsd metrics totalHits: metricGroup.NewMetric("total_hits", libtelemetry.OptStatsd), dropped: metricGroup.NewMetric("dropped", libtelemetry.OptStatsd), @@ -83,11 +92,14 @@ func (t *Telemetry) Log() { rejected := t.rejected.Delta() malformed := t.malformed.Delta() aggregations := t.aggregations.Delta() + newIncomplete := t.newIncomplete.Delta() + joinedIncomplete := t.joinedIncomplete.Delta() + totalIncomplete := t.totalIncomplete.Delta() elapsed := now - t.LastCheck.Load() t.LastCheck.Store(now) log.Debugf( - "http stats summary: requests_processed=%d(%.2f/s) requests_dropped=%d(%.2f/s) requests_rejected=%d(%.2f/s) requests_malformed=%d(%.2f/s) aggregations=%d", + "http stats summary: requests_processed=%d(%.2f/s) requests_dropped=%d(%.2f/s) requests_rejected=%d(%.2f/s) requests_malformed=%d(%.2f/s) incomplete_parts=%d(%.2f/s) incomplete_parts_joined=%d(%.2f/s) incomplete_parts_accumulated=%d aggregations=%d", totalRequests, float64(totalRequests)/float64(elapsed), dropped, @@ -96,6 +108,11 @@ func (t *Telemetry) Log() { float64(rejected)/float64(elapsed), malformed, float64(malformed)/float64(elapsed), + newIncomplete, + float64(newIncomplete)/float64(elapsed), + joinedIncomplete, + float64(joinedIncomplete)/float64(elapsed), + totalIncomplete, aggregations, ) } diff --git a/pkg/network/protocols/kafka/server.go b/pkg/network/protocols/kafka/server.go index 06f96bc6dbc0f..67fa3975a211d 100644 --- a/pkg/network/protocols/kafka/server.go +++ b/pkg/network/protocols/kafka/server.go @@ -22,5 +22,5 @@ func RunServer(t testing.TB, serverAddr, serverPort string) error { t.Helper() dir, _ := testutil.CurDir() - return protocolsUtils.RunDockerServer(t, "kafka", dir+"/testdata/docker-compose.yml", env, regexp.MustCompile(".*started \\(kafka.server.KafkaServer\\).*"), 2*time.Minute) + return protocolsUtils.RunDockerServer(t, "kafka", dir+"/testdata/docker-compose.yml", env, regexp.MustCompile(".*started \\(kafka.server.KafkaServer\\).*"), 3*time.Minute) } diff --git a/pkg/network/route_cache.go b/pkg/network/route_cache.go index e4ce4de5790fa..9fd7ebb093c20 100644 --- a/pkg/network/route_cache.go +++ b/pkg/network/route_cache.go @@ -59,6 +59,7 @@ var routeCacheTelemetry = struct { misses telemetry.Counter lookups telemetry.Counter expires telemetry.Counter + evicts telemetry.Counter netlinkLookups telemetry.Counter netlinkErrors telemetry.Counter @@ -73,6 +74,7 @@ var routeCacheTelemetry = struct { telemetry.NewCounter(routeCacheTelemetryModuleName, "misses", []string{}, "Counter measuring the number of route cache misses"), telemetry.NewCounter(routeCacheTelemetryModuleName, "lookups", []string{}, "Counter measuring the number of route cache lookups"), telemetry.NewCounter(routeCacheTelemetryModuleName, "expires", []string{}, "Counter measuring the number of route cache expirations"), + telemetry.NewCounter(routeCacheTelemetryModuleName, "evicts", []string{}, "Counter measuring the number of route cache evicts"), telemetry.NewCounter(routerTelemetryModuleName, "netlink_lookups", []string{}, "Counter measuring the number of netlink lookups"), telemetry.NewCounter(routerTelemetryModuleName, "netlink_errors", []string{}, "Counter measuring the number of netlink errors"), @@ -113,6 +115,10 @@ func newRouteCache(size int, router Router, ttl time.Duration) *routeCache { ttl: ttl, } + rc.cache.OnEvicted = func(_ lru.Key, _ interface{}) { + routeCacheTelemetry.evicts.Inc() + } + return rc } @@ -126,7 +132,10 @@ func (c *routeCache) Close() { func (c *routeCache) Get(source, dest util.Address, netns uint32) (Route, bool) { c.Lock() - defer c.Unlock() + defer func() { + routeCacheTelemetry.size.Set(float64(c.cache.Len())) + c.Unlock() + }() routeCacheTelemetry.lookups.Inc() k := newRouteKey(source, dest, netns) @@ -137,7 +146,6 @@ func (c *routeCache) Get(source, dest util.Address, netns uint32) (Route, bool) routeCacheTelemetry.expires.Inc() c.cache.Remove(k) - routeCacheTelemetry.size.Dec() } else { routeCacheTelemetry.misses.Inc() } @@ -149,7 +157,6 @@ func (c *routeCache) Get(source, dest util.Address, netns uint32) (Route, bool) } c.cache.Add(k, entry) - routeCacheTelemetry.size.Inc() return r, true } diff --git a/pkg/network/state.go b/pkg/network/state.go index 9f98cb47b9414..58995b5d55035 100644 --- a/pkg/network/state.go +++ b/pkg/network/state.go @@ -183,7 +183,7 @@ type networkState struct { // Network state configuration clientExpiry time.Duration - maxClosedConns int + maxClosedConns uint32 maxClientStats int maxDNSStats int maxHTTPStats int @@ -193,7 +193,7 @@ type networkState struct { } // NewState creates a new network state -func NewState(clientExpiry time.Duration, maxClosedConns, maxClientStats int, maxDNSStats int, maxHTTPStats int, maxKafkaStats int) State { +func NewState(clientExpiry time.Duration, maxClosedConns uint32, maxClientStats int, maxDNSStats int, maxHTTPStats int, maxKafkaStats int) State { return &networkState{ clients: map[string]*client{}, clientExpiry: clientExpiry, @@ -435,7 +435,7 @@ func (ns *networkState) storeClosedConnections(conns []ConnectionStats) { continue } - if len(client.closedConnections) >= ns.maxClosedConns { + if uint32(len(client.closedConnections)) >= ns.maxClosedConns { stateTelemetry.closedConnDropped.Inc() continue } diff --git a/pkg/network/tracer/compile_test.go b/pkg/network/tracer/compile_test.go index 63e9ae100d072..2679e1e38e04c 100644 --- a/pkg/network/tracer/compile_test.go +++ b/pkg/network/tracer/compile_test.go @@ -12,13 +12,16 @@ import ( "github.com/stretchr/testify/require" + "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" "github.com/DataDog/datadog-agent/pkg/network/config" ) func TestConntrackCompile(t *testing.T) { - cfg := config.New() - cfg.BPFDebug = true - out, err := getRuntimeCompiledConntracker(cfg) - require.NoError(t, err) - _ = out.Close() + ebpftest.TestBuildMode(t, ebpftest.RuntimeCompiled, "", func(t *testing.T) { + cfg := config.New() + cfg.BPFDebug = true + out, err := getRuntimeCompiledConntracker(cfg) + require.NoError(t, err) + _ = out.Close() + }) } diff --git a/pkg/network/tracer/connection/fentry/tracer.go b/pkg/network/tracer/connection/fentry/tracer.go index cd82efb8f67e3..d4fa029ee9904 100644 --- a/pkg/network/tracer/connection/fentry/tracer.go +++ b/pkg/network/tracer/connection/fentry/tracer.go @@ -10,6 +10,8 @@ package fentry import ( "errors" "fmt" + "os" + "syscall" manager "github.com/DataDog/ebpf-manager" @@ -51,6 +53,24 @@ func LoadTracer(config *config.Config, m *manager.Manager, mgrOpts manager.Optio telemetryMapKeys := errtelemetry.BuildTelemetryKeys(m) o.ConstantEditors = append(o.ConstantEditors, telemetryMapKeys...) + file, err := os.Stat("/proc/self/ns/pid") + + if err != nil { + return fmt.Errorf("could not load sysprobe pid: %w", err) + } + + device := file.Sys().(*syscall.Stat_t).Dev + inode := file.Sys().(*syscall.Stat_t).Ino + + o.ConstantEditors = append(o.ConstantEditors, manager.ConstantEditor{ + Name: "systemprobe_device", + Value: device, + }) + o.ConstantEditors = append(o.ConstantEditors, manager.ConstantEditor{ + Name: "systemprobe_ino", + Value: inode, + }) + // exclude all non-enabled probes to ensure we don't run into problems with unsupported probe types for _, p := range m.Probes { if _, enabled := enabledProbes[p.EBPFFuncName]; !enabled { diff --git a/pkg/network/tracer/connection/kprobe/compile_test.go b/pkg/network/tracer/connection/kprobe/compile_test.go index e6fe2a903a410..205e40546d301 100644 --- a/pkg/network/tracer/connection/kprobe/compile_test.go +++ b/pkg/network/tracer/connection/kprobe/compile_test.go @@ -12,13 +12,16 @@ import ( "github.com/stretchr/testify/require" + "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" "github.com/DataDog/datadog-agent/pkg/network/config" ) func TestTracerCompile(t *testing.T) { - cfg := config.New() - cfg.BPFDebug = true - out, err := getRuntimeCompiledTracer(cfg) - require.NoError(t, err) - _ = out.Close() + ebpftest.TestBuildMode(t, ebpftest.RuntimeCompiled, "", func(t *testing.T) { + cfg := config.New() + cfg.BPFDebug = true + out, err := getRuntimeCompiledTracer(cfg) + require.NoError(t, err) + _ = out.Close() + }) } diff --git a/pkg/network/tracer/connection/kprobe/config.go b/pkg/network/tracer/connection/kprobe/config.go index 2210047b85055..42e430f1f807b 100644 --- a/pkg/network/tracer/connection/kprobe/config.go +++ b/pkg/network/tracer/connection/kprobe/config.go @@ -27,6 +27,7 @@ func enabledProbes(c *config.Config, runtimeTracer, coreTracer bool) (map[probes kv410 := kernel.VersionCode(4, 1, 0) kv470 := kernel.VersionCode(4, 7, 0) + kv5180 := kernel.VersionCode(5, 18, 0) kv5190 := kernel.VersionCode(5, 19, 0) kv, err := kernel.HostVersion() if err != nil { @@ -95,7 +96,17 @@ func enabledProbes(c *config.Config, runtimeTracer, coreTracer bool) (map[probes if c.CollectUDPv6Conns { enableProbe(enabled, probes.UDPv6DestroySock) enableProbe(enabled, probes.UDPv6DestroySockReturn) - enableProbe(enabled, selectVersionBasedProbe(runtimeTracer, kv, probes.IP6MakeSkb, probes.IP6MakeSkbPre470, kv470)) + if kv >= kv5180 || runtimeTracer { + // prebuilt shouldn't arrive here with 5.18+ and UDPv6 enabled + if !coreTracer && !runtimeTracer { + return nil, fmt.Errorf("UDPv6 does not function on prebuilt tracer with kernel versions 5.18+") + } + enableProbe(enabled, probes.IP6MakeSkb) + } else if kv >= kv470 { + enableProbe(enabled, probes.IP6MakeSkbPre5180) + } else { + enableProbe(enabled, probes.IP6MakeSkbPre470) + } enableProbe(enabled, probes.IP6MakeSkbReturn) enableProbe(enabled, probes.Inet6Bind) enableProbe(enabled, probes.Inet6BindRet) diff --git a/pkg/network/tracer/connection/kprobe/manager.go b/pkg/network/tracer/connection/kprobe/manager.go index c9074e76e3678..cc6abcad5bb6c 100644 --- a/pkg/network/tracer/connection/kprobe/manager.go +++ b/pkg/network/tracer/connection/kprobe/manager.go @@ -133,6 +133,7 @@ func initManager(mgr *manager.Manager, config *config.Config, closedHandler *ebp mgr.Probes = append(mgr.Probes, &manager.Probe{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: probes.TCPRetransmitPre470, UID: probeUID}}, &manager.Probe{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: probes.IP6MakeSkbPre470, UID: probeUID}}, + &manager.Probe{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: probes.IP6MakeSkbPre5180, UID: probeUID}}, &manager.Probe{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: probes.UDPRecvMsgPre5190, UID: probeUID}}, &manager.Probe{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: probes.UDPv6RecvMsgPre5190, UID: probeUID}}, &manager.Probe{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: probes.UDPRecvMsgPre470, UID: probeUID}}, diff --git a/pkg/network/tracer/connection/tcp_close_consumer.go b/pkg/network/tracer/connection/tcp_close_consumer.go index 1cdb90107ec1d..bfb65138d485c 100644 --- a/pkg/network/tracer/connection/tcp_close_consumer.go +++ b/pkg/network/tracer/connection/tcp_close_consumer.go @@ -10,6 +10,7 @@ package connection import ( "sync" "time" + "unsafe" ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/network" @@ -26,8 +27,8 @@ var closerConsumerTelemetry = struct { perfReceived telemetry.Counter perfLost telemetry.Counter }{ - telemetry.NewCounter(closeConsumerModuleName, "closed_conn_polling_received", []string{}, "Counter measuring the number of closed connection batches received"), - telemetry.NewCounter(closeConsumerModuleName, "closed_conn_polling_lost", []string{}, "Counter measuring the number of batches lost (were transmitted from ebpf but never received)"), + telemetry.NewCounter(closeConsumerModuleName, "closed_conn_polling_received", []string{}, "Counter measuring the number of closed connections received"), + telemetry.NewCounter(closeConsumerModuleName, "closed_conn_polling_lost", []string{}, "Counter measuring the number of connections lost (were transmitted from ebpf but never received)"), } type tcpCloseConsumer struct { @@ -68,6 +69,13 @@ func (c *tcpCloseConsumer) Stop() { }) } +func (c *tcpCloseConsumer) extractConn(data []byte) { + ct := (*netebpf.Conn)(unsafe.Pointer(&data[0])) + conn := c.buffer.Next() + populateConnStats(conn, &ct.Tup, &ct.Conn_stats) + updateTCPStats(conn, ct.Conn_stats.Cookie, &ct.Tcp_stats) +} + func (c *tcpCloseConsumer) Start(callback func([]network.ConnectionStats)) { if c == nil { return @@ -75,8 +83,8 @@ func (c *tcpCloseConsumer) Start(callback func([]network.ConnectionStats)) { var ( then time.Time = time.Now() - closedCount int - lostCount int + closedCount uint64 + lostCount uint64 ) go func() { for { @@ -86,10 +94,20 @@ func (c *tcpCloseConsumer) Start(callback func([]network.ConnectionStats)) { return } - closerConsumerTelemetry.perfReceived.Inc() - batch := netebpf.ToBatch(batchData.Data) - c.batchManager.ExtractBatchInto(c.buffer, batch, batchData.CPU) - closedCount += c.buffer.Len() + l := len(batchData.Data) + switch { + case l >= netebpf.SizeofBatch: + batch := netebpf.ToBatch(batchData.Data) + c.batchManager.ExtractBatchInto(c.buffer, batch, batchData.CPU) + case l >= netebpf.SizeofConn: + c.extractConn(batchData.Data) + default: + log.Errorf("unknown type received from perf buffer, skipping. data size=%d, expecting %d or %d", len(batchData.Data), netebpf.SizeofConn, netebpf.SizeofBatch) + continue + } + + closerConsumerTelemetry.perfReceived.Add(float64(c.buffer.Len())) + closedCount += uint64(c.buffer.Len()) callback(c.buffer.Connections()) c.buffer.Reset() batchData.Done() @@ -97,8 +115,8 @@ func (c *tcpCloseConsumer) Start(callback func([]network.ConnectionStats)) { if !ok { return } - closerConsumerTelemetry.perfLost.Add(float64(lc)) - lostCount += netebpf.BatchSize + closerConsumerTelemetry.perfLost.Add(float64(lc * netebpf.BatchSize)) + lostCount += lc * netebpf.BatchSize case request, ok := <-c.requests: if !ok { return @@ -109,7 +127,7 @@ func (c *tcpCloseConsumer) Start(callback func([]network.ConnectionStats)) { callback(oneTimeBuffer.Connections()) close(request) - closedCount += oneTimeBuffer.Len() + closedCount += uint64(oneTimeBuffer.Len()) now := time.Now() elapsed := now.Sub(then) then = now diff --git a/pkg/network/tracer/connection/tracer.go b/pkg/network/tracer/connection/tracer.go index d2d4ec13704e4..1ba235f281239 100644 --- a/pkg/network/tracer/connection/tracer.go +++ b/pkg/network/tracer/connection/tracer.go @@ -83,8 +83,8 @@ var ConnTracerTelemetry = struct { connections telemetry.Gauge tcpFailedConnects *nettelemetry.StatCounterWrapper TcpSentMiscounts *nettelemetry.StatCounterWrapper - missedTcpClose *nettelemetry.StatCounterWrapper - missedUdpClose *nettelemetry.StatCounterWrapper + unbatchedTcpClose *nettelemetry.StatCounterWrapper + unbatchedUdpClose *nettelemetry.StatCounterWrapper UdpSendsProcessed *nettelemetry.StatCounterWrapper UdpSendsMissed *nettelemetry.StatCounterWrapper UdpDroppedConns *nettelemetry.StatCounterWrapper @@ -93,8 +93,8 @@ var ConnTracerTelemetry = struct { telemetry.NewGauge(connTracerModuleName, "connections", []string{"ip_proto", "family"}, "Gauge measuring the number of active connections in the EBPF map"), nettelemetry.NewStatCounterWrapper(connTracerModuleName, "tcp_failed_connects", []string{}, "Counter measuring the number of failed TCP connections in the EBPF map"), nettelemetry.NewStatCounterWrapper(connTracerModuleName, "tcp_sent_miscounts", []string{}, "Counter measuring the number of miscounted tcp sends in the EBPF map"), - nettelemetry.NewStatCounterWrapper(connTracerModuleName, "missed_tcp_close", []string{}, "Counter measuring the number of missed TCP close events in the EBPF map"), - nettelemetry.NewStatCounterWrapper(connTracerModuleName, "missed_udp_close", []string{}, "Counter measuring the number of missed UDP close events in the EBPF map"), + nettelemetry.NewStatCounterWrapper(connTracerModuleName, "unbatched_tcp_close", []string{}, "Counter measuring the number of missed TCP close events in the EBPF map"), + nettelemetry.NewStatCounterWrapper(connTracerModuleName, "unbatched_udp_close", []string{}, "Counter measuring the number of missed UDP close events in the EBPF map"), nettelemetry.NewStatCounterWrapper(connTracerModuleName, "udp_sends_processed", []string{}, "Counter measuring the number of processed UDP sends in EBPF"), nettelemetry.NewStatCounterWrapper(connTracerModuleName, "udp_sends_missed", []string{}, "Counter measuring failures to process UDP sends in EBPF"), nettelemetry.NewStatCounterWrapper(connTracerModuleName, "udp_dropped_conns", []string{}, "Counter measuring the number of dropped UDP connections in the EBPF map"), @@ -135,14 +135,14 @@ func NewTracer(config *config.Config, bpfTelemetry *errtelemetry.EBPFTelemetry) Max: math.MaxUint64, }, MapSpecEditors: map[string]manager.MapSpecEditor{ - probes.ConnMap: {Type: ebpf.Hash, MaxEntries: uint32(config.MaxTrackedConnections), EditorFlag: manager.EditMaxEntries}, - probes.TCPStatsMap: {Type: ebpf.Hash, MaxEntries: uint32(config.MaxTrackedConnections), EditorFlag: manager.EditMaxEntries}, - probes.PortBindingsMap: {Type: ebpf.Hash, MaxEntries: uint32(config.MaxTrackedConnections), EditorFlag: manager.EditMaxEntries}, - probes.UDPPortBindingsMap: {Type: ebpf.Hash, MaxEntries: uint32(config.MaxTrackedConnections), EditorFlag: manager.EditMaxEntries}, - probes.SockByPidFDMap: {Type: ebpf.Hash, MaxEntries: uint32(config.MaxTrackedConnections), EditorFlag: manager.EditMaxEntries}, - probes.PidFDBySockMap: {Type: ebpf.Hash, MaxEntries: uint32(config.MaxTrackedConnections), EditorFlag: manager.EditMaxEntries}, - probes.ConnectionProtocolMap: {Type: ebpf.Hash, MaxEntries: uint32(config.MaxTrackedConnections), EditorFlag: manager.EditMaxEntries}, - probes.ConnectionTupleToSocketSKBConnMap: {Type: ebpf.Hash, MaxEntries: uint32(config.MaxTrackedConnections), EditorFlag: manager.EditMaxEntries}, + probes.ConnMap: {Type: ebpf.Hash, MaxEntries: config.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries}, + probes.TCPStatsMap: {Type: ebpf.Hash, MaxEntries: config.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries}, + probes.PortBindingsMap: {Type: ebpf.Hash, MaxEntries: config.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries}, + probes.UDPPortBindingsMap: {Type: ebpf.Hash, MaxEntries: config.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries}, + probes.SockByPidFDMap: {Type: ebpf.Hash, MaxEntries: config.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries}, + probes.PidFDBySockMap: {Type: ebpf.Hash, MaxEntries: config.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries}, + probes.ConnectionProtocolMap: {Type: ebpf.Hash, MaxEntries: config.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries}, + probes.ConnectionTupleToSocketSKBConnMap: {Type: ebpf.Hash, MaxEntries: config.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries}, }, ConstantEditors: []manager.ConstantEditor{ boolConst("tcpv6_enabled", config.CollectTCPv6Conns), @@ -450,8 +450,8 @@ func (t *tracer) refreshProbeTelemetry() { ConnTracerTelemetry.tcpFailedConnects.Add(int64(ebpfTelemetry.Tcp_failed_connect) - ConnTracerTelemetry.tcpFailedConnects.Load()) ConnTracerTelemetry.TcpSentMiscounts.Add(int64(ebpfTelemetry.Tcp_sent_miscounts) - ConnTracerTelemetry.TcpSentMiscounts.Load()) - ConnTracerTelemetry.missedTcpClose.Add(int64(ebpfTelemetry.Missed_tcp_close) - ConnTracerTelemetry.missedTcpClose.Load()) - ConnTracerTelemetry.missedUdpClose.Add(int64(ebpfTelemetry.Missed_udp_close) - ConnTracerTelemetry.missedUdpClose.Load()) + ConnTracerTelemetry.unbatchedTcpClose.Add(int64(ebpfTelemetry.Unbatched_tcp_close) - ConnTracerTelemetry.unbatchedTcpClose.Load()) + ConnTracerTelemetry.unbatchedUdpClose.Add(int64(ebpfTelemetry.Unbatched_udp_close) - ConnTracerTelemetry.unbatchedUdpClose.Load()) ConnTracerTelemetry.UdpSendsProcessed.Add(int64(ebpfTelemetry.Udp_sends_processed) - ConnTracerTelemetry.UdpSendsProcessed.Load()) ConnTracerTelemetry.UdpSendsMissed.Add(int64(ebpfTelemetry.Udp_sends_missed) - ConnTracerTelemetry.UdpSendsMissed.Load()) ConnTracerTelemetry.UdpDroppedConns.Add(int64(ebpfTelemetry.Udp_dropped_conns) - ConnTracerTelemetry.UdpDroppedConns.Load()) @@ -559,7 +559,6 @@ func populateConnStats(stats *network.ConnectionStats, t *netebpf.ConnTuple, s * Application: protocols.Application(s.Protocol_stack.Application), Encryption: protocols.Encryption(s.Protocol_stack.Encryption), } - stats.StaticTags |= uint64(s.Conn_tags) if t.Type() == netebpf.TCP { stats.Type = network.TCP diff --git a/pkg/network/tracer/conntracker_test.go b/pkg/network/tracer/conntracker_test.go index 4aef9f8cd14e0..fc8be5085af77 100644 --- a/pkg/network/tracer/conntracker_test.go +++ b/pkg/network/tracer/conntracker_test.go @@ -18,6 +18,7 @@ import ( "github.com/vishvananda/netns" "go4.org/netipx" + "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/network/netlink" @@ -33,79 +34,65 @@ const ( ) func TestConntrackers(t *testing.T) { - conntrackers := []struct { - name string - create func(*testing.T, *config.Config) (netlink.Conntracker, error) - }{ - {"netlink", setupNetlinkConntracker}, - {"eBPF-prebuilt", setupPrebuiltEBPFConntracker}, - {"eBPF-runtime", setupRuntimeEBPFConntracker}, - } - for _, conntracker := range conntrackers { - t.Run(conntracker.name, func(t *testing.T) { - t.Run("IPv4", func(t *testing.T) { - cfg := config.New() - ct, err := conntracker.create(t, cfg) - require.NoError(t, err) - defer ct.Close() - - netlinktestutil.SetupDNAT(t) - - testConntracker(t, net.ParseIP("1.1.1.1"), net.ParseIP("2.2.2.2"), ct, cfg) - }) - t.Run("IPv6", func(t *testing.T) { - cfg := config.New() - ct, err := conntracker.create(t, cfg) - require.NoError(t, err) - defer ct.Close() - - netlinktestutil.SetupDNAT6(t) - - testConntracker(t, net.ParseIP("fd00::1"), net.ParseIP("fd00::2"), ct, cfg) - }) - t.Run("cross namespace - NAT rule on test namespace", func(t *testing.T) { - if conntracker.name == "netlink" { - kv, err := kernel.HostVersion() - require.NoError(t, err) - if kv >= kernel.VersionCode(5, 19, 0) && kv < kernel.VersionCode(6, 3, 0) { - // see https://lore.kernel.org/netfilter-devel/CALvGib_xHOVD2+6tKm2Sf0wVkQwut2_z2gksZPcGw30tOvOAAA@mail.gmail.com/T/#u - t.Skip("skip due to a kernel bug with conntrack netlink events flowing across namespaces") - } - } - - cfg := config.New() - cfg.EnableConntrackAllNamespaces = true - ct, err := conntracker.create(t, cfg) - require.NoError(t, err) - defer ct.Close() - - testConntrackerCrossNamespace(t, ct) - }) - t.Run("cross namespace - NAT rule on root namespace", func(t *testing.T) { - cfg := config.New() - cfg.EnableConntrackAllNamespaces = true - ct, err := conntracker.create(t, cfg) - require.NoError(t, err) - defer ct.Close() - - testConntrackerCrossNamespaceNATonRoot(t, ct) - }) + t.Run("netlink", func(t *testing.T) { + runConntrackerTest(t, "netlink", setupNetlinkConntracker) + }) + t.Run("eBPF", func(t *testing.T) { + ebpftest.TestBuildModes(t, []ebpftest.BuildMode{ebpftest.Prebuilt, ebpftest.RuntimeCompiled}, "", func(t *testing.T) { + runConntrackerTest(t, "eBPF", setupEBPFConntracker) }) - } + }) } -func setupPrebuiltEBPFConntracker(t *testing.T, cfg *config.Config) (netlink.Conntracker, error) { - // prebuilt on 5.18+ does not support UDPv6 - if kv >= kernel.VersionCode(5, 18, 0) { - cfg.CollectUDPv6Conns = false - } - cfg.EnableRuntimeCompiler = false - return NewEBPFConntracker(cfg, nil) +func runConntrackerTest(t *testing.T, name string, createFn func(*testing.T, *config.Config) (netlink.Conntracker, error)) { + t.Run("IPv4", func(t *testing.T) { + cfg := config.New() + ct, err := createFn(t, cfg) + require.NoError(t, err) + defer ct.Close() + + netlinktestutil.SetupDNAT(t) + + testConntracker(t, net.ParseIP("1.1.1.1"), net.ParseIP("2.2.2.2"), ct, cfg) + }) + t.Run("IPv6", func(t *testing.T) { + cfg := config.New() + ct, err := createFn(t, cfg) + require.NoError(t, err) + defer ct.Close() + + netlinktestutil.SetupDNAT6(t) + + testConntracker(t, net.ParseIP("fd00::1"), net.ParseIP("fd00::2"), ct, cfg) + }) + t.Run("cross namespace - NAT rule on test namespace", func(t *testing.T) { + if name == "netlink" { + if kv >= kernel.VersionCode(5, 19, 0) && kv < kernel.VersionCode(6, 3, 0) { + // see https://lore.kernel.org/netfilter-devel/CALvGib_xHOVD2+6tKm2Sf0wVkQwut2_z2gksZPcGw30tOvOAAA@mail.gmail.com/T/#u + t.Skip("skip due to a kernel bug with conntrack netlink events flowing across namespaces") + } + } + + cfg := config.New() + cfg.EnableConntrackAllNamespaces = true + ct, err := createFn(t, cfg) + require.NoError(t, err) + defer ct.Close() + + testConntrackerCrossNamespace(t, ct) + }) + t.Run("cross namespace - NAT rule on root namespace", func(t *testing.T) { + cfg := config.New() + cfg.EnableConntrackAllNamespaces = true + ct, err := createFn(t, cfg) + require.NoError(t, err) + defer ct.Close() + + testConntrackerCrossNamespaceNATonRoot(t, ct) + }) } -func setupRuntimeEBPFConntracker(t *testing.T, cfg *config.Config) (netlink.Conntracker, error) { - cfg.EnableRuntimeCompiler = true - cfg.AllowPrecompiledFallback = false +func setupEBPFConntracker(t *testing.T, cfg *config.Config) (netlink.Conntracker, error) { return NewEBPFConntracker(cfg, nil) } diff --git a/pkg/network/tracer/ebpf_conntracker.go b/pkg/network/tracer/ebpf_conntracker.go index 71e877490ae35..f841afe959840 100644 --- a/pkg/network/tracer/ebpf_conntracker.go +++ b/pkg/network/tracer/ebpf_conntracker.go @@ -424,6 +424,7 @@ func getManager(cfg *config.Config, buf io.ReaderAt, mapErrTelemetryMap, helperE EBPFFuncName: probes.ConntrackFillInfo, UID: "conntracker", }, + MatchFuncName: "^ctnetlink_fill_info(\\.constprop\\.0)?$", }, }, } diff --git a/pkg/network/tracer/gateway_lookup.go b/pkg/network/tracer/gateway_lookup.go index 57008b8e24ad8..cc9230a71f0fb 100644 --- a/pkg/network/tracer/gateway_lookup.go +++ b/pkg/network/tracer/gateway_lookup.go @@ -9,6 +9,7 @@ package tracer import ( "context" + "math" "net" "time" @@ -25,7 +26,7 @@ import ( ) const ( - maxRouteCacheSize = int(^uint(0) >> 1) // max int + maxRouteCacheSize = uint32(math.MaxUint32) maxSubnetCacheSize = 1024 gatewayLookupModuleName = "network_tracer__gateway_lookup" ) @@ -92,14 +93,14 @@ func newGatewayLookup(config *config.Config) *gatewayLookup { } routeCacheSize := maxRouteCacheSize - if config.MaxTrackedConnections <= uint(maxRouteCacheSize) { - routeCacheSize = int(config.MaxTrackedConnections) + if config.MaxTrackedConnections <= maxRouteCacheSize { + routeCacheSize = config.MaxTrackedConnections } else { log.Warnf("using truncated route cache size of %d instead of %d", routeCacheSize, config.MaxTrackedConnections) } gl.subnetCache, _ = simplelru.NewLRU(maxSubnetCacheSize, nil) - gl.routeCache = network.NewRouteCache(routeCacheSize, router) + gl.routeCache = network.NewRouteCache(int(routeCacheSize), router) return gl } diff --git a/pkg/network/tracer/offsetguess_test.go b/pkg/network/tracer/offsetguess_test.go index 7d69f04787cc9..621db8ea35360 100644 --- a/pkg/network/tracer/offsetguess_test.go +++ b/pkg/network/tracer/offsetguess_test.go @@ -22,6 +22,7 @@ import ( manager "github.com/DataDog/ebpf-manager" "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode/runtime" + "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" netebpf "github.com/DataDog/datadog-agent/pkg/network/ebpf" "github.com/DataDog/datadog-agent/pkg/network/tracer/offsetguess" "github.com/DataDog/datadog-agent/pkg/process/statsd" @@ -126,13 +127,14 @@ func (o offsetT) String() string { } func TestOffsetGuess(t *testing.T) { + ebpftest.TestBuildMode(t, ebpftest.RuntimeCompiled, "", testOffsetGuess) +} + +func testOffsetGuess(t *testing.T) { cfg := testConfig() // offset guessing used to rely on this previously, // but doesn't anymore cfg.ProtocolClassificationEnabled = false - if !cfg.EnableRuntimeCompiler { - t.Skip("runtime compilation is not enabled") - } offsetBuf, err := netebpf.ReadOffsetBPFModule(cfg.BPFDir, cfg.BPFDebug) require.NoError(t, err, "could not read offset bpf module") diff --git a/pkg/network/tracer/tracer.go b/pkg/network/tracer/tracer.go index 33e75e0fc8f2a..b19f18c96144c 100644 --- a/pkg/network/tracer/tracer.go +++ b/pkg/network/tracer/tracer.go @@ -57,15 +57,17 @@ const tracerModuleName = "network_tracer" // If we want to have a way to track the # of active TCP connections in the future we could use the procfs like here: https://github.com/DataDog/datadog-agent/pull/3728 // to determine whether a connection is truly closed or not var tracerTelemetry = struct { - skippedConns telemetry.Counter - expiredTCPConns telemetry.Counter - closedConns *nettelemetry.StatCounterWrapper - connStatsMapSize telemetry.Gauge + skippedConns telemetry.Counter + expiredTCPConns telemetry.Counter + closedConns *nettelemetry.StatCounterWrapper + connStatsMapSize telemetry.Gauge + payloadSizePerClient telemetry.Gauge }{ telemetry.NewCounter(tracerModuleName, "skipped_conns", []string{}, "Counter measuring skipped TCP connections"), telemetry.NewCounter(tracerModuleName, "expired_tcp_conns", []string{}, "Counter measuring expired TCP connections"), nettelemetry.NewStatCounterWrapper(tracerModuleName, "closed_conns", []string{}, "Counter measuring closed TCP connections"), telemetry.NewGauge(tracerModuleName, "conn_stats_map_size", []string{}, "Gauge measuring the size of the active connections map"), + telemetry.NewGauge(tracerModuleName, "payload_conn_count", []string{"client_id"}, "Gauge measuring the number of connections in the system-probe payload"), } // Tracer implements the functionality of the network tracer @@ -381,6 +383,8 @@ func (t *Tracer) GetActiveConnections(clientID string) (*network.Connections, er delta := t.state.GetDelta(clientID, latestTime, active, t.reverseDNS.GetDNSStats(), t.usmMonitor.GetHTTPStats(), t.usmMonitor.GetHTTP2Stats(), t.usmMonitor.GetKafkaStats()) t.activeBuffer.Reset() + tracerTelemetry.payloadSizePerClient.Set(float64(len(delta.Conns)), clientID) + ips := make([]util.Address, 0, len(delta.Conns)*2) for _, conn := range delta.Conns { ips = append(ips, conn.Source, conn.Dest) @@ -421,8 +425,8 @@ func (t *Tracer) removeClient(clientID string) { func (t *Tracer) getConnTelemetry(mapSize int) map[network.ConnTelemetryType]int64 { kprobeStats := ddebpf.GetProbeTotals() tm := map[network.ConnTelemetryType]int64{ - network.MonotonicKprobesTriggered: kprobeStats.Hits, - network.MonotonicKprobesMissed: kprobeStats.Misses, + network.MonotonicKprobesTriggered: int64(kprobeStats.Hits), + network.MonotonicKprobesMissed: int64(kprobeStats.Misses), network.ConnsBpfMapSize: int64(mapSize), network.MonotonicConnsClosed: tracerTelemetry.closedConns.Load(), } diff --git a/pkg/network/tracer/tracer_classification_linux_test.go b/pkg/network/tracer/tracer_classification_linux_test.go index aefa651e4c24e..63920348bbd87 100644 --- a/pkg/network/tracer/tracer_classification_linux_test.go +++ b/pkg/network/tracer/tracer_classification_linux_test.go @@ -64,7 +64,9 @@ func testProtocolClassificationInner(t *testing.T, params protocolClassification tr.removeClient(clientID) initTracerState(t, tr) require.NoError(t, tr.ebpfTracer.Resume(), "enable probes - before post tracer") - params.postTracerSetup(t, params.context) + if params.postTracerSetup != nil { + params.postTracerSetup(t, params.context) + } require.NoError(t, tr.ebpfTracer.Pause(), "disable probes - after post tracer") params.validation(t, params.context, tr) diff --git a/pkg/network/tracer/tracer_classification_test.go b/pkg/network/tracer/tracer_classification_test.go index 1dc6e9894cf92..b1495183dfe6b 100644 --- a/pkg/network/tracer/tracer_classification_test.go +++ b/pkg/network/tracer/tracer_classification_test.go @@ -1630,14 +1630,18 @@ func testEdgeCasesProtocolClassification(t *testing.T, tr *Tracer, clientHost, t } func waitForConnectionsWithProtocol(t *testing.T, tr *Tracer, targetAddr, serverAddr string, expectedProtocol protocols.ProtocolType, expectedTLS bool) { + t.Logf("looking for target addr %s", targetAddr) + t.Logf("looking for server addr %s", serverAddr) var outgoing, incoming *network.ConnectionStats - assert.Eventually(t, func() bool { + failed := !assert.Eventually(t, func() bool { conns := getConnections(t, tr) if outgoing == nil { for _, c := range searchConnections(conns, func(cs network.ConnectionStats) bool { return cs.Direction == network.OUTGOING && cs.Type == network.TCP && fmt.Sprintf("%s:%d", cs.Dest, cs.DPort) == targetAddr }) { + t.Logf("found potential outgoing connection %+v", c) if c.ProtocolStack.Contains(expectedProtocol) && (c.ProtocolStack.Contains(protocols.TLS) == expectedTLS) { + t.Logf("found outgoing connection %+v", c) outgoing = &c break } @@ -1648,17 +1652,22 @@ func waitForConnectionsWithProtocol(t *testing.T, tr *Tracer, targetAddr, server for _, c := range searchConnections(conns, func(cs network.ConnectionStats) bool { return cs.Direction == network.INCOMING && cs.Type == network.TCP && fmt.Sprintf("%s:%d", cs.Source, cs.SPort) == serverAddr }) { + t.Logf("found potential incoming connection %+v", c) if c.ProtocolStack.Contains(expectedProtocol) && (c.ProtocolStack.Contains(protocols.TLS) == expectedTLS) { + t.Logf("found incoming connection %+v", c) incoming = &c break } } } - failed := !(incoming != nil && outgoing != nil) + failed := incoming == nil || outgoing == nil if failed { t.Log(conns) } return !failed - }, 5*time.Second, 500*time.Millisecond, "could not find incoming or outgoing connections, incoming=%+v outgoing=%+v", incoming, outgoing) + }, 5*time.Second, 500*time.Millisecond, "could not find incoming or outgoing connections") + if failed { + t.Logf("incoming=%+v outgoing=%+v", incoming, outgoing) + } } diff --git a/pkg/network/tracer/tracer_linux_test.go b/pkg/network/tracer/tracer_linux_test.go index 3dd00e2cf5456..586fd3bee7565 100644 --- a/pkg/network/tracer/tracer_linux_test.go +++ b/pkg/network/tracer/tracer_linux_test.go @@ -50,13 +50,8 @@ import ( manager "github.com/DataDog/ebpf-manager" ) -var kv470 kernel.Version = kernel.VersionCode(4, 7, 0) -var kv kernel.Version - -func setKernelVersion() (err error) { - kv, err = kernel.HostVersion() - return -} +var kv470 = kernel.VersionCode(4, 7, 0) +var kv = kernel.MustHostVersion() func doDNSQuery(t *testing.T, domain string, serverIP string) (*net.UDPAddr, *net.UDPAddr) { dnsServerAddr := &net.UDPAddr{IP: net.ParseIP(serverIP), Port: 53} @@ -74,7 +69,8 @@ func doDNSQuery(t *testing.T, domain string, serverIP string) (*net.UDPAddr, *ne return dnsClientAddr, dnsServerAddr } -func TestTCPRemoveEntries(t *testing.T) { +func (s *TracerSuite) TestTCPRemoveEntries() { + t := s.T() config := testConfig() config.TCPConnTimeout = 100 * time.Millisecond tr := setupTracer(t, config) @@ -129,7 +125,8 @@ func TestTCPRemoveEntries(t *testing.T) { } -func TestTCPRetransmit(t *testing.T) { +func (s *TracerSuite) TestTCPRetransmit() { + t := s.T() // Enable BPF-based system probe tr := setupTracer(t, testConfig()) @@ -177,7 +174,8 @@ func TestTCPRetransmit(t *testing.T) { assert.Equal(t, addrPort(server.address), int(conn.DPort)) } -func TestTCPRetransmitSharedSocket(t *testing.T) { +func (s *TracerSuite) TestTCPRetransmitSharedSocket() { + t := s.T() // Create TCP Server that simply "drains" connection until receiving an EOF server := NewTCPServer(func(c net.Conn) { io.Copy(io.Discard, c) @@ -249,7 +247,8 @@ func TestTCPRetransmitSharedSocket(t *testing.T) { assert.GreaterOrEqual(t, connection.ConnTracerTelemetry.PidCollisions.Load(), int64(numProcesses-1)) } -func TestTCPRTT(t *testing.T) { +func (s *TracerSuite) TestTCPRTT() { + t := s.T() // Enable BPF-based system probe tr := setupTracer(t, testConfig()) // Create TCP Server that simply "drains" connection until receiving an EOF @@ -286,7 +285,8 @@ func TestTCPRTT(t *testing.T) { assert.EqualValues(t, int(tcpInfo.Rttvar), int(conn.RTTVar)) } -func TestTCPMiscount(t *testing.T) { +func (s *TracerSuite) TestTCPMiscount() { + t := s.T() t.Skip("skipping because this test will pass/fail depending on host performance") tr := setupTracer(t, testConfig()) // Create a dummy TCP Server @@ -339,7 +339,8 @@ func TestTCPMiscount(t *testing.T) { assert.NotZero(t, connection.ConnTracerTelemetry.TcpSentMiscounts.Load()) } -func TestConnectionExpirationRegression(t *testing.T) { +func (s *TracerSuite) TestConnectionExpirationRegression() { + t := s.T() t.SkipNow() tr := setupTracer(t, testConfig()) // Create TCP Server that simply "drains" connection until receiving an EOF @@ -386,7 +387,8 @@ func TestConnectionExpirationRegression(t *testing.T) { require.False(t, ok) } -func TestConntrackExpiration(t *testing.T) { +func (s *TracerSuite) TestConntrackExpiration() { + t := s.T() netlinktestutil.SetupDNAT(t) wg := sync.WaitGroup{} @@ -445,7 +447,8 @@ func TestConntrackExpiration(t *testing.T) { // This test ensures that conntrack lookups are retried for short-lived // connections when the first lookup fails -func TestConntrackDelays(t *testing.T) { +func (s *TracerSuite) TestConntrackDelays() { + t := s.T() netlinktestutil.SetupDNAT(t) wg := sync.WaitGroup{} @@ -488,7 +491,8 @@ func TestConntrackDelays(t *testing.T) { wg.Wait() } -func TestTranslationBindingRegression(t *testing.T) { +func (s *TracerSuite) TestTranslationBindingRegression() { + t := s.T() netlinktestutil.SetupDNAT(t) wg := sync.WaitGroup{} @@ -530,7 +534,8 @@ func TestTranslationBindingRegression(t *testing.T) { wg.Wait() } -func TestUnconnectedUDPSendIPv6(t *testing.T) { +func (s *TracerSuite) TestUnconnectedUDPSendIPv6() { + t := s.T() cfg := testConfig() if !cfg.CollectUDPv6Conns { t.Skip("UDPv6 disabled") @@ -559,7 +564,8 @@ func TestUnconnectedUDPSendIPv6(t *testing.T) { assert.Equal(t, bytesSent, int(outgoing[0].Monotonic.SentBytes)) } -func TestGatewayLookupNotEnabled(t *testing.T) { +func (s *TracerSuite) TestGatewayLookupNotEnabled() { + t := s.T() t.Run("gateway lookup enabled, not on aws", func(t *testing.T) { cfg := testConfig() cfg.EnableGatewayLookup = true @@ -596,7 +602,8 @@ func TestGatewayLookupNotEnabled(t *testing.T) { }) } -func TestGatewayLookupEnabled(t *testing.T) { +func (s *TracerSuite) TestGatewayLookupEnabled() { + t := s.T() ctrl := gomock.NewController(t) m := NewMockcloudProvider(ctrl) oldCloud := cloud @@ -647,7 +654,8 @@ func TestGatewayLookupEnabled(t *testing.T) { require.Equal(t, conn.Via.Subnet.Alias, fmt.Sprintf("subnet-%d", ifi.Index)) } -func TestGatewayLookupSubnetLookupError(t *testing.T) { +func (s *TracerSuite) TestGatewayLookupSubnetLookupError() { + t := s.T() ctrl := gomock.NewController(t) m := NewMockcloudProvider(ctrl) oldCloud := cloud @@ -702,7 +710,8 @@ func TestGatewayLookupSubnetLookupError(t *testing.T) { require.Equal(t, 1, calls, "calls to subnetForHwAddrFunc are != 1 for hw addr %s", ifi.HardwareAddr) } -func TestGatewayLookupCrossNamespace(t *testing.T) { +func (s *TracerSuite) TestGatewayLookupCrossNamespace() { + t := s.T() ctrl := gomock.NewController(t) m := NewMockcloudProvider(ctrl) oldCloud := cloud @@ -855,7 +864,8 @@ func TestGatewayLookupCrossNamespace(t *testing.T) { }) } -func TestConnectionAssured(t *testing.T) { +func (s *TracerSuite) TestConnectionAssured() { + t := s.T() cfg := testConfig() tr := setupTracer(t, cfg) server := &UDPServer{ @@ -895,7 +905,8 @@ func TestConnectionAssured(t *testing.T) { require.True(t, conn.IsAssured) } -func TestConnectionNotAssured(t *testing.T) { +func (s *TracerSuite) TestConnectionNotAssured() { + t := s.T() cfg := testConfig() tr := setupTracer(t, cfg) @@ -929,7 +940,8 @@ func TestConnectionNotAssured(t *testing.T) { require.False(t, conn.IsAssured) } -func TestUDPConnExpiryTimeout(t *testing.T) { +func (s *TracerSuite) TestUDPConnExpiryTimeout() { + t := s.T() streamTimeout, err := sysctl.NewInt("/proc", "net/netfilter/nf_conntrack_udp_timeout_stream", 0).Get() require.NoError(t, err) timeout, err := sysctl.NewInt("/proc", "net/netfilter/nf_conntrack_udp_timeout", 0).Get() @@ -940,7 +952,8 @@ func TestUDPConnExpiryTimeout(t *testing.T) { require.Equal(t, uint64(time.Duration(streamTimeout)*time.Second), tr.udpConnTimeout(true)) } -func TestDNATIntraHostIntegration(t *testing.T) { +func (s *TracerSuite) TestDNATIntraHostIntegration() { + t := s.T() netlinktestutil.SetupDNAT(t) tr := setupTracer(t, testConfig()) @@ -989,7 +1002,8 @@ func TestDNATIntraHostIntegration(t *testing.T) { assert.True(t, c.IntraHost, "did not find incoming connection classified as local: %v", c) } -func TestSelfConnect(t *testing.T) { +func (s *TracerSuite) TestSelfConnect() { + t := s.T() // Enable BPF-based system probe cfg := testConfig() cfg.TCPConnTimeout = 3 * time.Second @@ -1034,7 +1048,8 @@ func TestSelfConnect(t *testing.T) { }, 5*time.Second, time.Second, "could not find expected number of tcp connections, expected: 2") } -func TestUDPPeekCount(t *testing.T) { +func (s *TracerSuite) TestUDPPeekCount() { + t := s.T() config := testConfig() tr := setupTracer(t, config) @@ -1119,7 +1134,8 @@ func TestUDPPeekCount(t *testing.T) { require.True(t, incoming.IntraHost) } -func TestUDPPythonReusePort(t *testing.T) { +func (s *TracerSuite) TestUDPPythonReusePort() { + t := s.T() cfg := testConfig() if isPrebuilt(cfg) && kv < kv470 { t.Skip("reuseport not supported on prebuilt") @@ -1198,7 +1214,8 @@ func TestUDPPythonReusePort(t *testing.T) { } } -func TestUDPReusePort(t *testing.T) { +func (s *TracerSuite) TestUDPReusePort() { + t := s.T() t.Run("v4", func(t *testing.T) { testUDPReusePort(t, "udp4", "127.0.0.1") }) @@ -1290,7 +1307,8 @@ func testUDPReusePort(t *testing.T, udpnet string, ip string) { } } -func TestDNSStatsWithNAT(t *testing.T) { +func (s *TracerSuite) TestDNSStatsWithNAT() { + t := s.T() testutil.IptablesSave(t) // Setup a NAT rule to translate 2.2.2.2 to 8.8.8.8 and issue a DNS request to 2.2.2.2 cmds := []string{"iptables -t nat -A OUTPUT -d 2.2.2.2 -j DNAT --to-destination 8.8.8.8"} @@ -1346,7 +1364,8 @@ type SyscallConn interface { SyscallConn() (syscall.RawConn, error) } -func TestSendfileRegression(t *testing.T) { +func (s *TracerSuite) TestSendfileRegression() { + t := s.T() // Start tracer cfg := testConfig() tr := setupTracer(t, cfg) @@ -1460,7 +1479,8 @@ func isPrebuilt(cfg *config.Config) bool { return true } -func TestSendfileError(t *testing.T) { +func (s *TracerSuite) TestSendfileError() { + t := s.T() tr := setupTracer(t, testConfig()) tmpfile, err := os.CreateTemp("", "sendfile_source") @@ -1516,7 +1536,8 @@ func sendFile(t *testing.T, c SyscallConn, f *os.File, offset *int64, count int) return n, serr } -func TestShortWrite(t *testing.T) { +func (s *TracerSuite) TestShortWrite() { + t := s.T() tr := setupTracer(t, testConfig()) read := make(chan struct{}) @@ -1534,14 +1555,14 @@ func TestShortWrite(t *testing.T) { server.Shutdown() }) - s, err := unix.Socket(syscall.AF_INET, syscall.SOCK_STREAM|syscall.SOCK_NONBLOCK, 0) + sk, err := unix.Socket(syscall.AF_INET, syscall.SOCK_STREAM|syscall.SOCK_NONBLOCK, 0) require.NoError(t, err) - defer syscall.Close(s) + defer syscall.Close(sk) - err = unix.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_SNDBUF, 5000) + err = unix.SetsockoptInt(sk, syscall.SOL_SOCKET, syscall.SO_SNDBUF, 5000) require.NoError(t, err) - sndBufSize, err := unix.GetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_SNDBUF) + sndBufSize, err := unix.GetsockoptInt(sk, syscall.SOL_SOCKET, syscall.SO_SNDBUF) require.NoError(t, err) require.GreaterOrEqual(t, sndBufSize, 5000) @@ -1553,16 +1574,16 @@ func TestShortWrite(t *testing.T) { require.NoError(t, err) sa.Port = int(port) - err = unix.Connect(s, &sa) + err = unix.Connect(sk, &sa) if syscall.EINPROGRESS != err { require.NoError(t, err) } var wfd unix.FdSet wfd.Zero() - wfd.Set(s) + wfd.Set(sk) tv := unix.NsecToTimeval(int64((5 * time.Second).Nanoseconds())) - nfds, err := unix.Select(s+1, nil, &wfd, nil, &tv) + nfds, err := unix.Select(sk+1, nil, &wfd, nil, &tv) require.NoError(t, err) require.Equal(t, 1, nfds) @@ -1571,7 +1592,7 @@ func TestShortWrite(t *testing.T) { var sent uint64 toSend := sndBufSize / 2 for i := 0; i < 100; i++ { - written, err = unix.Write(s, genPayload(toSend)) + written, err = unix.Write(sk, genPayload(toSend)) require.Greater(t, written, 0) require.NoError(t, err) sent += uint64(written) @@ -1584,7 +1605,7 @@ func TestShortWrite(t *testing.T) { require.True(t, done) - f := os.NewFile(uintptr(s), "") + f := os.NewFile(uintptr(sk), "") defer f.Close() c, err := net.FileConn(f) require.NoError(t, err) @@ -1600,7 +1621,8 @@ func TestShortWrite(t *testing.T) { assert.Equal(t, sent, conn.Monotonic.SentBytes) } -func TestKprobeAttachWithKprobeEvents(t *testing.T) { +func (s *TracerSuite) TestKprobeAttachWithKprobeEvents() { + t := s.T() cfg := config.New() cfg.AttachKprobesWithKprobeEventsABI = true @@ -1620,10 +1642,11 @@ func TestKprobeAttachWithKprobeEvents(t *testing.T) { require.True(t, ok) fmt.Printf("p_tcp_sendmsg_hits = %d\n", p_tcp_sendmsg) - assert.Greater(t, p_tcp_sendmsg, int64(0)) + assert.Greater(t, p_tcp_sendmsg, uint64(0)) } -func TestBlockingReadCounts(t *testing.T) { +func (s *TracerSuite) TestBlockingReadCounts() { + t := s.T() tr := setupTracer(t, testConfig()) server := NewTCPServer(func(c net.Conn) { c.Write([]byte("foo")) @@ -1657,7 +1680,8 @@ func TestBlockingReadCounts(t *testing.T) { assert.Equal(t, uint64(n), conn.Monotonic.RecvBytes) } -func TestTCPDirectionWithPreexistingConnection(t *testing.T) { +func (s *TracerSuite) TestTCPDirectionWithPreexistingConnection() { + t := s.T() wg := sync.WaitGroup{} // setup server to listen on a port @@ -1713,7 +1737,8 @@ func TestTCPDirectionWithPreexistingConnection(t *testing.T) { require.Equal(t, network.INCOMING, origConn[0].Direction, "original server<->client connection should have incoming direction") } -func TestPreexistingConnectionDirection(t *testing.T) { +func (s *TracerSuite) TestPreexistingConnectionDirection() { + t := s.T() // Start the client and server before we enable the system probe to test that the tracer picks // up the pre-existing connection @@ -1837,6 +1862,7 @@ func TestConntrackerFallback(t *testing.T) { cfg.EnableEbpfConntracker = false cfg.AllowNetlinkConntrackerFallback = true conntracker, err := newConntracker(cfg, nil) + // ensure we always clean up the conntracker, regardless of behavior if conntracker != nil { t.Cleanup(conntracker.Close) } @@ -1845,6 +1871,7 @@ func TestConntrackerFallback(t *testing.T) { cfg.AllowNetlinkConntrackerFallback = false conntracker, err = newConntracker(cfg, nil) + // ensure we always clean up the conntracker, regardless of behavior if conntracker != nil { t.Cleanup(conntracker.Close) } @@ -1854,9 +1881,6 @@ func TestConntrackerFallback(t *testing.T) { func testConfig() *config.Config { cfg := config.New() - if os.Getenv("BPF_DEBUG") != "" { - cfg.BPFDebug = true - } if ddconfig.IsECSFargate() { // protocol classification not yet supported on fargate cfg.ProtocolClassificationEnabled = false diff --git a/pkg/network/tracer/tracer_test.go b/pkg/network/tracer/tracer_test.go index 569b52f78b5f5..d89fa07eb6de7 100644 --- a/pkg/network/tracer/tracer_test.go +++ b/pkg/network/tracer/tracer_test.go @@ -30,10 +30,12 @@ import ( "github.com/miekg/dns" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" "golang.org/x/sync/errgroup" syscfg "github.com/DataDog/datadog-agent/cmd/system-probe/config" ddconfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/network/driver" @@ -54,19 +56,20 @@ func TestMain(m *testing.M) { logLevel = "warn" } log.SetupLogger(seelog.Default, logLevel) - cfg := testConfig() - if cfg.EnableRuntimeCompiler { - fmt.Println("RUNTIME COMPILER ENABLED") - } - - if err := setKernelVersion(); err != nil { - fmt.Println("Failed to get kernel version, halting the tests", err) - os.Exit(1) - } - driver.Init(&syscfg.Config{}) + _ = driver.Init(&syscfg.Config{}) os.Exit(m.Run()) } +type TracerSuite struct { + suite.Suite +} + +func TestTracerSuite(t *testing.T) { + ebpftest.TestBuildModes(t, ebpftest.SupportedBuildModes(), "", func(t *testing.T) { + suite.Run(t, new(TracerSuite)) + }) +} + func isFentry() bool { fentryTests := os.Getenv("NETWORK_TRACER_FENTRY_TESTS") return fentryTests == "true" @@ -87,7 +90,8 @@ func setupTracer(t testing.TB, cfg *config.Config) *Tracer { return tr } -func TestGetStats(t *testing.T) { +func (s *TracerSuite) TestGetStats() { + t := s.T() httpSupported := httpSupported() linuxExpected := map[string]interface{}{} err := json.Unmarshal([]byte(`{ @@ -136,7 +140,6 @@ func TestGetStats(t *testing.T) { cfg := testConfig() cfg.EnableHTTPMonitoring = true cfg.EnableEbpfConntracker = enableEbpfConntracker - cfg.AllowPrecompiledFallback = true tr := setupTracer(t, cfg) <-time.After(time.Second) @@ -165,8 +168,8 @@ func TestGetStats(t *testing.T) { } } -func TestTCPSendAndReceive(t *testing.T) { - // Enable BPF-based system probe +func (s *TracerSuite) TestTCPSendAndReceive() { + t := s.T() tr := setupTracer(t, testConfig()) // Create TCP Server which, for every line, sends back a message with size=serverMessageSize @@ -222,7 +225,8 @@ func TestTCPSendAndReceive(t *testing.T) { assert.True(t, conn.IntraHost) } -func TestTCPShortLived(t *testing.T) { +func (s *TracerSuite) TestTCPShortLived() { + t := s.T() // Enable BPF-based system probe cfg := testConfig() cfg.TCPClosedTimeout = 10 * time.Millisecond @@ -275,7 +279,8 @@ func TestTCPShortLived(t *testing.T) { assert.False(t, ok) } -func TestTCPOverIPv6(t *testing.T) { +func (s *TracerSuite) TestTCPOverIPv6() { + t := s.T() t.SkipNow() cfg := testConfig() cfg.CollectTCPv6Conns = true @@ -331,7 +336,8 @@ func TestTCPOverIPv6(t *testing.T) { doneChan <- struct{}{} } -func TestTCPCollectionDisabled(t *testing.T) { +func (s *TracerSuite) TestTCPCollectionDisabled() { + t := s.T() if runtime.GOOS == "windows" { t.Skip("Test disabled on Windows") } @@ -372,7 +378,8 @@ func TestTCPCollectionDisabled(t *testing.T) { require.False(t, ok) } -func TestTCPConnsReported(t *testing.T) { +func (s *TracerSuite) TestTCPConnsReported() { + t := s.T() // Setup cfg := testConfig() cfg.CollectTCPv4Conns = true @@ -403,7 +410,8 @@ func TestTCPConnsReported(t *testing.T) { require.True(t, ok) } -func TestUDPSendAndReceive(t *testing.T) { +func (s *TracerSuite) TestUDPSendAndReceive() { + t := s.T() t.Run("v4", func(t *testing.T) { if !testConfig().CollectUDPv4Conns { t.Skip("UDPv4 disabled") @@ -480,7 +488,8 @@ func testUDPSendAndReceive(t *testing.T, addr string) { } } -func TestUDPDisabled(t *testing.T) { +func (s *TracerSuite) TestUDPDisabled() { + t := s.T() // Enable BPF-based system probe with UDP disabled cfg := testConfig() cfg.CollectUDPv4Conns = false @@ -519,7 +528,8 @@ func TestUDPDisabled(t *testing.T) { require.False(t, ok) } -func TestLocalDNSCollectionDisabled(t *testing.T) { +func (s *TracerSuite) TestLocalDNSCollectionDisabled() { + t := s.T() // Enable BPF-based system probe with DNS disabled (by default) config := testConfig() @@ -543,7 +553,8 @@ func TestLocalDNSCollectionDisabled(t *testing.T) { } } -func TestLocalDNSCollectionEnabled(t *testing.T) { +func (s *TracerSuite) TestLocalDNSCollectionEnabled() { + t := s.T() // Enable BPF-based system probe with DNS enabled cfg := testConfig() cfg.CollectLocalDNS = true @@ -576,7 +587,8 @@ func isLocalDNS(c network.ConnectionStats) bool { return c.Source.String() == "127.0.0.1" && c.Dest.String() == "127.0.0.1" && c.DPort == 53 } -func TestShouldSkipExcludedConnection(t *testing.T) { +func (s *TracerSuite) TestShouldSkipExcludedConnection() { + t := s.T() // exclude connections from 127.0.0.1:80 cfg := testConfig() // exclude source SSH connections to make this pass in VM @@ -614,7 +626,8 @@ func TestShouldSkipExcludedConnection(t *testing.T) { }, "Unable to find UDP connection to 127.0.0.1:80") } -func TestShouldExcludeEmptyStatsConnection(t *testing.T) { +func (s *TracerSuite) TestShouldExcludeEmptyStatsConnection() { + t := s.T() cfg := testConfig() tr := setupTracer(t, cfg) @@ -1072,19 +1085,21 @@ func testDNSStats(t *testing.T, domain string, success int, failure int, timeout assert.Equal(t, uint32(timeout), timeouts) } -func TestDNSStatsForValidDomain(t *testing.T) { - testDNSStats(t, "golang.org", 1, 0, 0, validDNSServer) -} - -func TestDNSStatsForInvalidDomain(t *testing.T) { - testDNSStats(t, "abcdedfg", 0, 1, 0, validDNSServer) -} - -func TestDNSStatsForTimeout(t *testing.T) { - testDNSStats(t, "golang.org", 0, 0, 1, "1.2.3.4") +func (s *TracerSuite) TestDNSStats() { + t := s.T() + t.Run("valid domain", func(t *testing.T) { + testDNSStats(t, "golang.org", 1, 0, 0, validDNSServer) + }) + t.Run("invalid domain", func(t *testing.T) { + testDNSStats(t, "abcdedfg", 0, 1, 0, validDNSServer) + }) + t.Run("timeout", func(t *testing.T) { + testDNSStats(t, "golang.org", 0, 0, 1, "1.2.3.4") + }) } -func TestTCPEstablished(t *testing.T) { +func (s *TracerSuite) TestTCPEstablished() { + t := s.T() // Ensure closed connections are flushed as soon as possible cfg := testConfig() cfg.TCPClosedTimeout = 500 * time.Millisecond @@ -1122,7 +1137,8 @@ func TestTCPEstablished(t *testing.T) { assert.Equal(t, uint32(1), conn.Last.TCPClosed) } -func TestTCPEstablishedPreExistingConn(t *testing.T) { +func (s *TracerSuite) TestTCPEstablishedPreExistingConn() { + t := s.T() server := NewTCPServer(func(c net.Conn) { io.Copy(io.Discard, c) c.Close() @@ -1153,7 +1169,8 @@ func TestTCPEstablishedPreExistingConn(t *testing.T) { assert.Equal(t, uint32(1), m.TCPClosed) } -func TestUnconnectedUDPSendIPv4(t *testing.T) { +func (s *TracerSuite) TestUnconnectedUDPSendIPv4() { + t := s.T() cfg := testConfig() tr := setupTracer(t, cfg) @@ -1176,7 +1193,8 @@ func TestUnconnectedUDPSendIPv4(t *testing.T) { assert.Equal(t, bytesSent, int(outgoing[0].Monotonic.SentBytes)) } -func TestConnectedUDPSendIPv6(t *testing.T) { +func (s *TracerSuite) TestConnectedUDPSendIPv6() { + t := s.T() cfg := testConfig() if !testConfig().CollectUDPv6Conns { t.Skip("UDPv6 disabled") @@ -1202,7 +1220,8 @@ func TestConnectedUDPSendIPv6(t *testing.T) { assert.Equal(t, bytesSent, int(outgoing[0].Monotonic.SentBytes)) } -func TestConnectionClobber(t *testing.T) { +func (s *TracerSuite) TestConnectionClobber() { + t := s.T() cfg := testConfig() cfg.CollectUDPv4Conns = false cfg.CollectUDPv6Conns = false @@ -1322,7 +1341,8 @@ func TestConnectionClobber(t *testing.T) { assert.Equal(t, preCap, tr.activeBuffer.Capacity()) } -func TestTCPDirection(t *testing.T) { +func (s *TracerSuite) TestTCPDirection() { + t := s.T() cfg := testConfig() tr := setupTracer(t, cfg) diff --git a/pkg/network/tracer/tracer_usm_linux_test.go b/pkg/network/tracer/tracer_usm_linux_test.go index 1897706e615fb..a9da4f0e8b207 100644 --- a/pkg/network/tracer/tracer_usm_linux_test.go +++ b/pkg/network/tracer/tracer_usm_linux_test.go @@ -26,9 +26,13 @@ import ( "testing" "time" + "github.com/DataDog/gopsutil/host" + krpretty "github.com/kr/pretty" "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" "golang.org/x/sys/unix" + "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/network/config" javatestutil "github.com/DataDog/datadog-agent/pkg/network/java/testutil" @@ -72,7 +76,18 @@ func classificationSupported(config *config.Config) bool { return kprobe.ClassificationSupported(config) } -func TestEnableHTTPMonitoring(t *testing.T) { +type USMSuite struct { + suite.Suite +} + +func TestUSMSuite(t *testing.T) { + ebpftest.TestBuildModes(t, []ebpftest.BuildMode{ebpftest.Prebuilt, ebpftest.RuntimeCompiled, ebpftest.CORE}, "", func(t *testing.T) { + suite.Run(t, new(USMSuite)) + }) +} + +func (s *USMSuite) TestEnableHTTPMonitoring() { + t := s.T() if !httpSupported() { t.Skip("HTTP monitoring not supported") } @@ -82,7 +97,8 @@ func TestEnableHTTPMonitoring(t *testing.T) { _ = setupTracer(t, cfg) } -func TestHTTPStats(t *testing.T) { +func (s *USMSuite) TestHTTPStats() { + t := s.T() t.Run("status code", func(t *testing.T) { testHTTPStats(t, true) }) @@ -140,7 +156,8 @@ func testHTTPStats(t *testing.T, aggregateByStatusCode bool) { }, 3*time.Second, 10*time.Millisecond, "couldn't find http connection matching: %s", serverAddr) } -func TestHTTPSViaLibraryIntegration(t *testing.T) { +func (s *USMSuite) TestHTTPSViaLibraryIntegration() { + t := s.T() if !httpsSupported() { t.Skip("HTTPS feature not available/supported for this setup") } @@ -271,59 +288,97 @@ func testHTTPSLibrary(t *testing.T, fetchCmd []string, prefetchLibs []string) { cfg.CollectTCPv4Conns = true cfg.CollectTCPv6Conns = true tr := setupTracer(t, cfg) - fentryTracerEnabled := tr.ebpfTracer.Type() == connection.TracerTypeFentry // not ideal but, short process are hard to catch for _, lib := range prefetchLibs { prefetchLib(t, lib) } - time.Sleep(time.Second) + time.Sleep(2 * time.Second) // Issue request using fetchCmd (wget, curl, ...) // This is necessary (as opposed to using net/http) because we want to // test a HTTP client linked to OpenSSL or GnuTLS const targetURL = "https://127.0.0.1:443/200/foobar" cmd := append(fetchCmd, targetURL) - requestCmd := exec.Command(cmd[0], cmd[1:]...) - out, err := requestCmd.CombinedOutput() - require.NoErrorf(t, err, "failed to issue request via %s: %s\n%s", fetchCmd, err, string(out)) + t.Log("run 3 clients request as we can have a race between the closing tcp socket and the http response") + fetchPids := make(map[uint32]struct{}) + for i := 0; i < 3; i++ { + requestCmd := exec.Command(cmd[0], cmd[1:]...) + out, err := requestCmd.CombinedOutput() + require.NoErrorf(t, err, "failed to issue request via %s: %s\n%s", fetchCmd, err, string(out)) + fetchPid := uint32(requestCmd.Process.Pid) + fetchPids[fetchPid] = struct{}{} + t.Logf("%s pid %d", cmd[0], fetchPid) + } + + var allConnections []network.ConnectionStats + httpKeys := make(map[uint16]http.Key) require.Eventuallyf(t, func() bool { payload := getConnections(t, tr) + allConnections = append(allConnections, payload.Conns...) + found := false for key, stats := range payload.HTTP { + if key.Path.Content != "/200/foobar" { + continue + } req, exists := stats.Data[200] if !exists { - continue + t.Errorf("http %# v stats %# v", krpretty.Formatter(key), krpretty.Formatter(stats)) + return false } statsTags := req.StaticTags // debian 10 have curl binary linked with openssl and gnutls but use only openssl during tls query (there no runtime flag available) // this make harder to map lib and tags, one set of tag should match but not both - if key.Path.Content == "/200/foobar" && (statsTags == network.ConnTagGnuTLS || statsTags == network.ConnTagOpenSSL) { + if statsTags == network.ConnTagGnuTLS || statsTags == network.ConnTagOpenSSL { t.Logf("found tag 0x%x %s", statsTags, network.GetStaticTags(statsTags)) - - // socket filter is not supported on fentry tracer - if fentryTracerEnabled { - // so we return early if the test was successful until now - return true - } - - for _, c := range payload.Conns { - if c.SPort == key.SrcPort && c.DPort == key.DstPort && c.ProtocolStack.Contains(protocols.TLS) { - return true - } - } - t.Logf("HTTP connection %v doesn't contain ConnTagTLS\n", key) + httpKeys[key.SrcPort] = key + found = true + continue + } else { + s, _ := tr.getStats(allStats...) + t.Logf("==== %# v\n%# v", krpretty.Formatter(req), krpretty.Formatter(s)) } - t.Logf("HTTP stat didn't match criteria %v tags 0x%x\n", key, statsTags) - for _, c := range payload.Conns { - possibleKeyTuples := network.ConnectionKeysFromConnectionStats(c) - t.Logf("conn sport %d dport %d tags %x staticTags %x connKey [%v] or [%v]\n", c.SPort, c.DPort, c.Tags, c.StaticTags, possibleKeyTuples[0], possibleKeyTuples[1]) + if len(httpKeys) == 3 { + return true } + t.Logf("HTTP stat didn't match criteria %v tags 0x%x\n", key, statsTags) } - return false - }, 10*time.Second, 1*time.Second, "couldn't find HTTPS stats") + if !found { + s, _ := tr.getStats(allStats...) + t.Logf("=====loop= %# v", krpretty.Formatter(s)) + } + return found + }, 15*time.Second, 5*time.Second, "couldn't find USM HTTPS stats") + + // check NPM static TLS tag + found := false + for _, c := range allConnections { + httpKey, foundKey := httpKeys[c.SPort] + if !foundKey { + continue + } + _, foundPid := fetchPids[c.Pid] + if foundPid && c.DPort == httpKey.DstPort && c.ProtocolStack.Contains(protocols.TLS) { + found = true + break + } + } + if !found { + t.Errorf("NPM TLS tag not found") + for _, c := range allConnections { + httpKey, foundKey := httpKeys[c.SPort] + if !foundKey { + continue + } + _, foundPid := fetchPids[c.Pid] + if foundPid { + t.Logf("pid %d connection %# v \nhttp %# v\n", c.Pid, krpretty.Formatter(c), krpretty.Formatter(httpKey)) + } + } + } } const ( @@ -331,7 +386,8 @@ const ( ) // TestOpenSSLVersions setups a HTTPs python server, and makes sure we are able to capture all traffic. -func TestOpenSSLVersions(t *testing.T) { +func (s *USMSuite) TestOpenSSLVersions() { + t := s.T() if !httpsSupported() { t.Skip("HTTPS feature not available/supported for this setup") } @@ -391,7 +447,8 @@ func TestOpenSSLVersions(t *testing.T) { // Unfortunately, this is only a best-effort mechanism and it relies on some assumptions that are not always necessarily true // such as having SSL_read/SSL_write calls in the same call-stack/execution-context as the kernel function tcp_sendmsg. Force // this is reason the fallback behavior may require a few warmup requests before we start capturing traffic. -func TestOpenSSLVersionsSlowStart(t *testing.T) { +func (s *USMSuite) TestOpenSSLVersionsSlowStart() { + t := s.T() if !httpsSupported() { t.Skip("HTTPS feature not available/supported for this setup") } @@ -517,7 +574,8 @@ func isRequestIncluded(allStats map[http.Key]*http.RequestStats, req *nethttp.Re return false } -func TestProtocolClassification(t *testing.T) { +func (s *USMSuite) TestProtocolClassification() { + t := s.T() cfg := testConfig() if !classificationSupported(cfg) { t.Skip("Classification is not supported") @@ -637,7 +695,8 @@ func createJavaTempFile(t *testing.T, dir string) string { return tempfile.Name() } -func TestJavaInjection(t *testing.T) { +func (s *USMSuite) TestJavaInjection() { + t := s.T() if !httpsSupported() { t.Skip("java TLS not supported on the current platform") } @@ -859,84 +918,46 @@ func TestJavaInjection(t *testing.T) { } } -// GoTLS test func TestHTTPGoTLSAttachProbes(t *testing.T) { - if !goTLSSupported() { - t.Skip("GoTLS not supported for this setup") - } - - t.Run("runtime compilation", func(t *testing.T) { - cfg := testConfig() - cfg.EnableRuntimeCompiler = true - cfg.AllowPrecompiledFallback = false - cfg.EnableCORE = false + modes := []ebpftest.BuildMode{ebpftest.RuntimeCompiled, ebpftest.CORE} + ebpftest.TestBuildModes(t, modes, "", func(t *testing.T) { + if !goTLSSupported() { + t.Skip("GoTLS not supported for this setup") + } + info, err := host.Info() + require.NoError(t, err) + // TODO fix TestHTTPGoTLSAttachProbes on these Fedora versions + if info.Platform == "fedora" && (info.PlatformVersion == "36" || info.PlatformVersion == "37") { + // TestHTTPGoTLSAttachProbes fails consistently in CI on Fedora 36,37 + t.Skip("TestHTTPGoTLSAttachProbes fails on this OS consistently") + } t.Run("new process", func(t *testing.T) { - testHTTPGoTLSCaptureNewProcess(t, cfg) + testHTTPGoTLSCaptureNewProcess(t, config.New()) }) t.Run("already running process", func(t *testing.T) { - testHTTPGoTLSCaptureAlreadyRunning(t, cfg) + testHTTPGoTLSCaptureAlreadyRunning(t, config.New()) }) }) +} - t.Run("CO-RE", func(t *testing.T) { - // note: this is a bit of hack since CI runs an entire package either as - // runtime, CO-RE, or pre-built. here we're piggybacking on the runtime pass - // and running the CO-RE tests as well - cfg := testConfig() - cfg.EnableCORE = true - cfg.EnableRuntimeCompiler = false - cfg.AllowRuntimeCompiledFallback = false +func TestHTTPSGoTLSAttachProbesOnContainer(t *testing.T) { + t.Skip("Skipping a flaky test") + modes := []ebpftest.BuildMode{ebpftest.RuntimeCompiled, ebpftest.CORE} + ebpftest.TestBuildModes(t, modes, "", func(t *testing.T) { + if !goTLSSupported() { + t.Skip("GoTLS not supported for this setup") + } t.Run("new process", func(t *testing.T) { - testHTTPGoTLSCaptureNewProcess(t, cfg) + testHTTPsGoTLSCaptureNewProcessContainer(t, config.New()) }) t.Run("already running process", func(t *testing.T) { - testHTTPGoTLSCaptureAlreadyRunning(t, cfg) + testHTTPsGoTLSCaptureAlreadyRunningContainer(t, config.New()) }) }) } -func TestHTTPSGoTLSAttachProbesOnContainer(t *testing.T) { - t.Skip("Skipping a flaky test") - if !goTLSSupported() { - t.Skip("GoTLS not supported for this setup") - } - - t.Run("new process (runtime compilation)", func(t *testing.T) { - cfg := config.New() - cfg.EnableRuntimeCompiler = true - cfg.EnableCORE = false - testHTTPsGoTLSCaptureNewProcessContainer(t, cfg) - }) - - t.Run("already running process (runtime compilation)", func(t *testing.T) { - cfg := config.New() - cfg.EnableRuntimeCompiler = true - cfg.EnableCORE = false - testHTTPsGoTLSCaptureAlreadyRunningContainer(t, cfg) - }) - - // note: this is a bit of hack since CI runs an entire package either as - // runtime, CO-RE, or pre-built. here we're piggybacking on the runtime pass - // and running the CO-RE tests as well - t.Run("new process (co-re)", func(t *testing.T) { - cfg := config.New() - cfg.EnableCORE = true - cfg.EnableRuntimeCompiler = false - cfg.AllowRuntimeCompiledFallback = false - testHTTPsGoTLSCaptureNewProcessContainer(t, cfg) - }) - - t.Run("already running process (co-re)", func(t *testing.T) { - cfg := config.New() - cfg.EnableCORE = true - cfg.EnableRuntimeCompiler = false - cfg.AllowRuntimeCompiledFallback = false - testHTTPsGoTLSCaptureAlreadyRunningContainer(t, cfg) - }) -} - // Test that we can capture HTTPS traffic from Go processes started after the // tracer. func testHTTPGoTLSCaptureNewProcess(t *testing.T, cfg *config.Config) { @@ -1081,7 +1102,8 @@ type tlsTestCommand struct { } // TLS classification tests -func TestTLSClassification(t *testing.T) { +func (s *USMSuite) TestTLSClassification() { + t := s.T() cfg := testConfig() cfg.ProtocolClassificationEnabled = true cfg.CollectTCPv4Conns = true @@ -1267,20 +1289,45 @@ func testHTTPSClassification(t *testing.T, tr *Tracer, clientHost, targetHost, s require.NoError(t, err) t.Cleanup(closer) }, - postTracerSetup: func(t *testing.T, ctx testContext) { + validation: func(t *testing.T, ctx testContext, tr *Tracer) { client := nethttp.Client{ Transport: &nethttp.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, DialContext: defaultDialer.DialContext, }, } - resp, err := client.Get(fmt.Sprintf("https://%s/200/request-1", ctx.targetAddress)) - require.NoError(t, err) - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - client.CloseIdleConnections() + + // Ensure that we see HTTPS requests being traced *before* the actual test assertions + // This is done to reduce test test flakiness due to uprobe attachment delays + require.Eventually(t, func() bool { + resp, err := client.Get(fmt.Sprintf("https://%s/200/warm-up", ctx.targetAddress)) + if err != nil { + return false + } + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + + httpData := getConnections(t, tr).HTTP + for httpKey := range httpData { + if httpKey.Path.Content == resp.Request.URL.Path { + return true + } + } + + return false + }, 5*time.Second, 100*time.Millisecond, "couldn't detect HTTPS traffic being traced (test setup validation)") + + t.Log("run 3 clients request as we can have a race between the closing tcp socket and the http response") + for i := 0; i < 3; i++ { + resp, err := client.Get(fmt.Sprintf("https://%s/200/request-1", ctx.targetAddress)) + require.NoError(t, err) + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + client.CloseIdleConnections() + } + + waitForConnectionsWithProtocol(t, tr, ctx.targetAddress, ctx.serverAddress, protocols.HTTP, tlsExpected) }, - validation: validateProtocolConnection(protocols.HTTP, tlsExpected), }, } for _, tt := range tests { diff --git a/pkg/network/tracer/tracer_windows_test.go b/pkg/network/tracer/tracer_windows_test.go index 34f4bdabd0ec0..eebda2ee2c43e 100644 --- a/pkg/network/tracer/tracer_windows_test.go +++ b/pkg/network/tracer/tracer_windows_test.go @@ -11,10 +11,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/network/config" ) -func setKernelVersion() error { - return nil -} - func httpSupported() bool { return false } diff --git a/pkg/network/usm/compile_test.go b/pkg/network/usm/compile_test.go index 13a8374d3b5be..b922e099a9f85 100644 --- a/pkg/network/usm/compile_test.go +++ b/pkg/network/usm/compile_test.go @@ -12,20 +12,23 @@ import ( "github.com/stretchr/testify/require" + "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/network/protocols/http" "github.com/DataDog/datadog-agent/pkg/util/kernel" ) func TestHttpCompile(t *testing.T) { - currKernelVersion, err := kernel.HostVersion() - require.NoError(t, err) - if currKernelVersion < http.MinimumKernelVersion { - t.Skip("USM Runtime compilation not supported on this kernel version") - } - cfg := config.New() - cfg.BPFDebug = true - out, err := getRuntimeCompiledUSM(cfg) - require.NoError(t, err) - _ = out.Close() + ebpftest.TestBuildMode(t, ebpftest.RuntimeCompiled, "", func(t *testing.T) { + currKernelVersion, err := kernel.HostVersion() + require.NoError(t, err) + if currKernelVersion < http.MinimumKernelVersion { + t.Skip("USM Runtime compilation not supported on this kernel version") + } + cfg := config.New() + cfg.BPFDebug = true + out, err := getRuntimeCompiledUSM(cfg) + require.NoError(t, err) + _ = out.Close() + }) } diff --git a/pkg/network/usm/ebpf_gotls.go b/pkg/network/usm/ebpf_gotls.go index 45f1ec8f9fe2c..e11a12778e147 100644 --- a/pkg/network/usm/ebpf_gotls.go +++ b/pkg/network/usm/ebpf_gotls.go @@ -202,7 +202,7 @@ func (p *GoTLSProgram) ConfigureManager(m *errtelemetry.Manager) { func (p *GoTLSProgram) ConfigureOptions(options *manager.Options) { options.MapSpecEditors[connectionTupleByGoTLSMap] = manager.MapSpecEditor{ Type: ebpf.Hash, - MaxEntries: uint32(p.cfg.MaxTrackedConnections), + MaxEntries: p.cfg.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries, } } diff --git a/pkg/network/usm/ebpf_javatls.go b/pkg/network/usm/ebpf_javatls.go index 98d7f620324ed..c08cff6e14663 100644 --- a/pkg/network/usm/ebpf_javatls.go +++ b/pkg/network/usm/ebpf_javatls.go @@ -20,6 +20,8 @@ import ( "github.com/cilium/ebpf" + manager "github.com/DataDog/ebpf-manager" + "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/network/java" "github.com/DataDog/datadog-agent/pkg/network/protocols/http" @@ -27,7 +29,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/process/monitor" "github.com/DataDog/datadog-agent/pkg/process/util" "github.com/DataDog/datadog-agent/pkg/util/log" - manager "github.com/DataDog/ebpf-manager" ) const ( @@ -132,7 +133,7 @@ func (p *JavaTLSProgram) ConfigureManager(m *nettelemetry.Manager) { func (p *JavaTLSProgram) ConfigureOptions(options *manager.Options) { options.MapSpecEditors[javaTLSConnectionsMap] = manager.MapSpecEditor{ Type: ebpf.Hash, - MaxEntries: uint32(p.cfg.MaxTrackedConnections), + MaxEntries: p.cfg.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries, } options.ActivatedProbes = append(options.ActivatedProbes, diff --git a/pkg/network/usm/ebpf_main.go b/pkg/network/usm/ebpf_main.go index 323a7eea8caab..1200676d2417a 100644 --- a/pkg/network/usm/ebpf_main.go +++ b/pkg/network/usm/ebpf_main.go @@ -357,22 +357,22 @@ func (e *ebpfProgram) init(buf bytecode.AssetReader, options manager.Options) er options.MapSpecEditors = map[string]manager.MapSpecEditor{ httpInFlightMap: { Type: ebpf.Hash, - MaxEntries: uint32(e.cfg.MaxTrackedConnections), + MaxEntries: e.cfg.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries, }, http2InFlightMap: { Type: ebpf.Hash, - MaxEntries: uint32(e.cfg.MaxTrackedConnections), + MaxEntries: e.cfg.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries, }, connectionStatesMap: { Type: ebpf.Hash, - MaxEntries: uint32(e.cfg.MaxTrackedConnections), + MaxEntries: e.cfg.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries, }, kafkaLastTCPSeqPerConnectionMap: { Type: ebpf.Hash, - MaxEntries: uint32(e.cfg.MaxTrackedConnections), + MaxEntries: e.cfg.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries, }, } @@ -384,7 +384,7 @@ func (e *ebpfProgram) init(buf bytecode.AssetReader, options manager.Options) er } else { options.MapSpecEditors[probes.ConnectionProtocolMap] = manager.MapSpecEditor{ Type: ebpf.Hash, - MaxEntries: uint32(e.cfg.MaxTrackedConnections), + MaxEntries: e.cfg.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries, } } diff --git a/pkg/network/usm/ebpf_ssl.go b/pkg/network/usm/ebpf_ssl.go index 55a31cfb52828..b5a04a3031861 100644 --- a/pkg/network/usm/ebpf_ssl.go +++ b/pkg/network/usm/ebpf_ssl.go @@ -258,7 +258,7 @@ func (o *sslProgram) ConfigureManager(m *errtelemetry.Manager) { func (o *sslProgram) ConfigureOptions(options *manager.Options) { options.MapSpecEditors[sslSockByCtxMap] = manager.MapSpecEditor{ Type: ebpf.Hash, - MaxEntries: uint32(o.cfg.MaxTrackedConnections), + MaxEntries: o.cfg.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries, } diff --git a/pkg/network/usm/kafka_monitor_test.go b/pkg/network/usm/kafka_monitor_test.go index 3ec284f2f500f..65fe8717323ab 100644 --- a/pkg/network/usm/kafka_monitor_test.go +++ b/pkg/network/usm/kafka_monitor_test.go @@ -21,6 +21,7 @@ import ( "github.com/twmb/franz-go/pkg/kgo" "github.com/twmb/franz-go/pkg/kversion" + "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/network/protocols/http" "github.com/DataDog/datadog-agent/pkg/network/protocols/http/testutil" @@ -28,14 +29,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/kernel" ) -type BinaryType int - -const ( - PREBUILT = 0 - RUNTIME = 1 - CORE = 2 -) - const ( kafkaPort = "9092" ) @@ -84,6 +77,10 @@ func skipTestIfKernelNotSupported(t *testing.T) { } func TestKafkaProtocolParsing(t *testing.T) { + ebpftest.TestBuildModes(t, []ebpftest.BuildMode{ebpftest.Prebuilt, ebpftest.RuntimeCompiled, ebpftest.CORE}, "", testKafkaProtocolParsing) +} + +func testKafkaProtocolParsing(t *testing.T) { skipTestIfKernelNotSupported(t) clientHost := "localhost" @@ -462,33 +459,22 @@ func newHTTPWithKafkaMonitor(t *testing.T, cfg *config.Config) *Monitor { func TestLoadKafkaBinary(t *testing.T) { skipTestIfKernelNotSupported(t) - for mode, debug := range map[string]bool{"debug": true, "release": false} { - for runType, val := range map[string]BinaryType{"CORE": CORE, "RUNTIME": RUNTIME, "PREBUILT": PREBUILT} { - t.Run(fmt.Sprintf("%s %s binary", runType, mode), func(t *testing.T) { - loadKafkaBinary(t, debug, val) - }) - } - } + ebpftest.TestBuildModes(t, []ebpftest.BuildMode{ebpftest.Prebuilt, ebpftest.RuntimeCompiled, ebpftest.CORE}, "", func(t *testing.T) { + t.Run("debug", func(t *testing.T) { + loadKafkaBinary(t, true) + }) + t.Run("release", func(t *testing.T) { + loadKafkaBinary(t, false) + }) + }) } -func loadKafkaBinary(t *testing.T, debug bool, binaryType BinaryType) { +func loadKafkaBinary(t *testing.T, debug bool) { cfg := config.New() // We don't have a way of enabling kafka without http at the moment cfg.EnableHTTPMonitoring = true cfg.EnableKafkaMonitoring = true cfg.BPFDebug = debug - cfg.AllowPrecompiledFallback = false - cfg.AllowRuntimeCompiledFallback = false - cfg.EnableCORE = false - switch binaryType { - case PREBUILT: - cfg.AllowPrecompiledFallback = true - case RUNTIME: - cfg.AllowRuntimeCompiledFallback = true - case CORE: - cfg.EnableCORE = true - } - newHTTPWithKafkaMonitor(t, cfg) } diff --git a/pkg/network/usm/monitor.go b/pkg/network/usm/monitor.go index 8db52d31821eb..26010707acc3a 100644 --- a/pkg/network/usm/monitor.go +++ b/pkg/network/usm/monitor.go @@ -264,8 +264,9 @@ func (m *Monitor) GetHTTPStats() map[http.Key]*http.RequestStats { return nil } + defer m.httpTelemetry.Log() + m.httpConsumer.Sync() - m.httpTelemetry.Log() return m.httpStatkeeper.GetAndResetAllStats() } @@ -276,8 +277,9 @@ func (m *Monitor) GetHTTP2Stats() map[http.Key]*http.RequestStats { return nil } + defer m.http2Telemetry.Log() + m.http2Consumer.Sync() - m.http2Telemetry.Log() return m.http2Statkeeper.GetAndResetAllStats() } @@ -287,8 +289,9 @@ func (m *Monitor) GetKafkaStats() map[kafka.Key]*kafka.RequestStat { return nil } + defer m.kafkaTelemetry.Log() + m.kafkaConsumer.Sync() - m.kafkaTelemetry.Log() return m.kafkaStatkeeper.GetAndResetAllStats() } diff --git a/pkg/network/usm/monitor_test.go b/pkg/network/usm/monitor_test.go index a58785c5922cf..f2ef7bb051375 100644 --- a/pkg/network/usm/monitor_test.go +++ b/pkg/network/usm/monitor_test.go @@ -25,7 +25,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" "github.com/DataDog/datadog-agent/pkg/network/config" netlink "github.com/DataDog/datadog-agent/pkg/network/netlink/testutil" "github.com/DataDog/datadog-agent/pkg/network/protocols/http" @@ -41,7 +43,18 @@ var ( emptyBody = []byte(nil) ) -func TestHTTPMonitorCaptureRequestMultipleTimes(t *testing.T) { +type HTTPTestSuite struct { + suite.Suite +} + +func TestHTTP(t *testing.T) { + ebpftest.TestBuildModes(t, []ebpftest.BuildMode{ebpftest.Prebuilt, ebpftest.RuntimeCompiled, ebpftest.CORE}, "", func(t *testing.T) { + suite.Run(t, new(HTTPTestSuite)) + }) +} + +func (s *HTTPTestSuite) TestHTTPMonitorCaptureRequestMultipleTimes() { + t := s.T() monitor := newHTTPMonitor(t) serverAddr := "localhost:8081" srvDoneFn := testutil.HTTPServer(t, serverAddr, testutil.Options{}) @@ -71,7 +84,8 @@ func TestHTTPMonitorCaptureRequestMultipleTimes(t *testing.T) { // TestHTTPMonitorLoadWithIncompleteBuffers sends thousands of requests without getting responses for them, in parallel // we send another request. We expect to capture the another request but not the incomplete requests. -func TestHTTPMonitorLoadWithIncompleteBuffers(t *testing.T) { +func (s *HTTPTestSuite) TestHTTPMonitorLoadWithIncompleteBuffers() { + t := s.T() monitor := newHTTPMonitor(t) slowServerAddr := "localhost:8080" @@ -121,7 +135,8 @@ func TestHTTPMonitorLoadWithIncompleteBuffers(t *testing.T) { require.True(t, foundFastReq) } -func TestHTTPMonitorIntegrationWithResponseBody(t *testing.T) { +func (s *HTTPTestSuite) TestHTTPMonitorIntegrationWithResponseBody() { + t := s.T() targetAddr := "localhost:8080" serverAddr := "localhost:8080" @@ -178,7 +193,8 @@ func TestHTTPMonitorIntegrationWithResponseBody(t *testing.T) { } } -func TestHTTPMonitorIntegrationSlowResponse(t *testing.T) { +func (s *HTTPTestSuite) TestHTTPMonitorIntegrationSlowResponse() { + t := s.T() targetAddr := "localhost:8080" serverAddr := "localhost:8080" @@ -243,7 +259,8 @@ func TestHTTPMonitorIntegrationSlowResponse(t *testing.T) { } } -func TestHTTPMonitorIntegration(t *testing.T) { +func (s *HTTPTestSuite) TestHTTPMonitorIntegration() { + t := s.T() targetAddr := "localhost:8080" serverAddr := "localhost:8080" @@ -259,7 +276,8 @@ func TestHTTPMonitorIntegration(t *testing.T) { }) } -func TestHTTPMonitorIntegrationWithNAT(t *testing.T) { +func (s *HTTPTestSuite) TestHTTPMonitorIntegrationWithNAT() { + t := s.T() // SetupDNAT sets up a NAT translation from 2.2.2.2 to 1.1.1.1 netlink.SetupDNAT(t) @@ -277,7 +295,8 @@ func TestHTTPMonitorIntegrationWithNAT(t *testing.T) { }) } -func TestUnknownMethodRegression(t *testing.T) { +func (s *HTTPTestSuite) TestUnknownMethodRegression() { + t := s.T() monitor := newHTTPMonitor(t) // SetupDNAT sets up a NAT translation from 2.2.2.2 to 1.1.1.1 @@ -306,7 +325,8 @@ func TestUnknownMethodRegression(t *testing.T) { } } -func TestRSTPacketRegression(t *testing.T) { +func (s *HTTPTestSuite) TestRSTPacketRegression() { + t := s.T() monitor := newHTTPMonitor(t) serverAddr := "127.0.0.1:8080" @@ -339,7 +359,8 @@ func TestRSTPacketRegression(t *testing.T) { includesRequest(t, stats, &nethttp.Request{URL: url}) } -func TestKeepAliveWithIncompleteResponseRegression(t *testing.T) { +func (s *HTTPTestSuite) TestKeepAliveWithIncompleteResponseRegression() { + t := s.T() monitor := newHTTPMonitor(t) const req = "GET /200/foobar HTTP/1.1\n" diff --git a/pkg/network/usm/shared_libraries_test.go b/pkg/network/usm/shared_libraries_test.go index 2f7a882b31828..a9577dd458487 100644 --- a/pkg/network/usm/shared_libraries_test.go +++ b/pkg/network/usm/shared_libraries_test.go @@ -18,6 +18,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/suite" "go.uber.org/atomic" manager "github.com/DataDog/ebpf-manager" @@ -26,6 +27,7 @@ import ( "golang.org/x/sys/unix" ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" + "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" "github.com/DataDog/datadog-agent/pkg/network/config" netebpf "github.com/DataDog/datadog-agent/pkg/network/ebpf" "github.com/DataDog/datadog-agent/pkg/network/ebpf/probes" @@ -43,7 +45,18 @@ func registerProcessTerminationUponCleanup(t *testing.T, cmd *exec.Cmd) { }) } -func TestSharedLibraryDetection(t *testing.T) { +type SharedLibrarySuite struct { + suite.Suite +} + +func TestSharedLibrary(t *testing.T) { + ebpftest.TestBuildModes(t, []ebpftest.BuildMode{ebpftest.Prebuilt, ebpftest.RuntimeCompiled, ebpftest.CORE}, "", func(t *testing.T) { + suite.Run(t, new(SharedLibrarySuite)) + }) +} + +func (s *SharedLibrarySuite) TestSharedLibraryDetection() { + t := s.T() perfHandler := initEBPFProgram(t) fooPath1, fooPathID1 := createTempTestFile(t, "foo.so") @@ -92,7 +105,8 @@ func TestSharedLibraryDetection(t *testing.T) { }, time.Second*10, time.Second, "") } -func TestSharedLibraryDetectionWithPIDAndRootNameSpace(t *testing.T) { +func (s *SharedLibrarySuite) TestSharedLibraryDetectionWithPIDandRootNameSpace() { + t := s.T() _, err := os.Stat("/usr/bin/busybox") if err != nil { t.Skip("skip for the moment as some distro are not friendly with busybox package") @@ -151,7 +165,8 @@ func TestSharedLibraryDetectionWithPIDAndRootNameSpace(t *testing.T) { require.Error(t, err) } -func TestSameInodeRegression(t *testing.T) { +func (s *SharedLibrarySuite) TestSameInodeRegression() { + t := s.T() perfHandler := initEBPFProgram(t) fooPath1, fooPathID1 := createTempTestFile(t, "a-foo.so") @@ -203,7 +218,8 @@ func TestSameInodeRegression(t *testing.T) { }, time.Second*10, time.Second, "") } -func TestSoWatcherLeaks(t *testing.T) { +func (s *SharedLibrarySuite) TestSoWatcherLeaks() { + t := s.T() perfHandler := initEBPFProgram(t) fooPath1, fooPathID1 := createTempTestFile(t, "foo.so") @@ -283,7 +299,8 @@ func TestSoWatcherLeaks(t *testing.T) { checkWatcherStateIsClean(t, watcher) } -func TestSoWatcherProcessAlreadyHoldingReferences(t *testing.T) { +func (s *SharedLibrarySuite) TestSoWatcherProcessAlreadyHoldingReferences() { + t := s.T() perfHandler := initEBPFProgram(t) fooPath1, fooPathID1 := createTempTestFile(t, "foo.so") diff --git a/pkg/otlp/internal/serializerexporter/consumer.go b/pkg/otlp/internal/serializerexporter/consumer.go index ee2aca80b4aca..32b060378ee8a 100644 --- a/pkg/otlp/internal/serializerexporter/consumer.go +++ b/pkg/otlp/internal/serializerexporter/consumer.go @@ -121,6 +121,20 @@ func (c *serializerConsumer) addTelemetryMetric(hostname string) { }) } +// addRuntimeTelemetryMetric to know if an Agent is using OTLP runtime metrics. +func (c *serializerConsumer) addRuntimeTelemetryMetric(hostname string, languageTags []string) { + for _, lang := range languageTags { + c.series = append(c.series, &metrics.Serie{ + Name: "datadog.agent.otlp.runtime_metrics", + Points: []metrics.Point{{Value: 1, Ts: float64(time.Now().Unix())}}, + Tags: tagset.CompositeTagsFromSlice([]string{fmt.Sprintf("language:%v", lang)}), + Host: hostname, + MType: metrics.APIGaugeType, + SourceTypeName: "System", + }) + } +} + // Send exports all data recorded by the consumer. It does not reset the consumer. func (c *serializerConsumer) Send(s serializer.MetricSerializer) error { var serieErr, sketchesErr error diff --git a/pkg/otlp/internal/serializerexporter/exporter.go b/pkg/otlp/internal/serializerexporter/exporter.go index 7019733feadf1..d0a1381950930 100644 --- a/pkg/otlp/internal/serializerexporter/exporter.go +++ b/pkg/otlp/internal/serializerexporter/exporter.go @@ -170,12 +170,15 @@ func newExporter(logger *zap.Logger, s serializer.MetricSerializer, cfg *exporte func (e *exporter) ConsumeMetrics(ctx context.Context, ld pmetric.Metrics) error { consumer := &serializerConsumer{cardinality: e.cardinality, extraTags: e.extraTags} - err := e.tr.MapMetrics(ctx, ld, consumer) + rmt, err := e.tr.MapMetrics(ctx, ld, consumer) if err != nil { return err } consumer.addTelemetryMetric(e.hostname) + if rmt.HasRuntimeMetrics { + consumer.addRuntimeTelemetryMetric(e.hostname, rmt.LanguageTags) + } if err := consumer.Send(e.s); err != nil { return fmt.Errorf("failed to flush metrics: %w", err) } diff --git a/pkg/otlp/internal/serializerexporter/exporter_test.go b/pkg/otlp/internal/serializerexporter/exporter_test.go index bb607942afd7b..1f45370476718 100644 --- a/pkg/otlp/internal/serializerexporter/exporter_test.go +++ b/pkg/otlp/internal/serializerexporter/exporter_test.go @@ -55,8 +55,10 @@ func Test_ConsumeMetrics_Tags(t *testing.T) { defer config.Datadog.Set("hostname", "") const ( - histogramMetricName = "test.histogram" - numberMetricName = "test.gauge" + histogramMetricName = "test.histogram" + numberMetricName = "test.gauge" + histogramRuntimeMetricName = "process.runtime.dotnet.exceptions.count" + numberRuntimeMetricName = "process.runtime.go.goroutines" ) tests := []struct { name string @@ -123,6 +125,64 @@ func Test_ConsumeMetrics_Tags(t *testing.T) { nil, ), }, + { + name: "runtime metrics, no tags", + genMetrics: func(t *testing.T) pmetric.Metrics { + h := pmetric.NewHistogramDataPoint() + h.BucketCounts().FromRaw([]uint64{100}) + h.SetCount(100) + h.SetSum(0) + + n := pmetric.NewNumberDataPoint() + n.SetIntValue(777) + return newMetrics(histogramMetricName, h, numberMetricName, n) + }, + setConfig: func(t *testing.T) {}, + wantSketchTags: tagset.NewCompositeTags([]string{}, nil), + wantSerieTags: tagset.NewCompositeTags([]string{}, nil), + }, + { + name: "runtime metrics, metric tags and extra tags", + genMetrics: func(t *testing.T) pmetric.Metrics { + h := pmetric.NewHistogramDataPoint() + h.BucketCounts().FromRaw([]uint64{100}) + h.SetCount(100) + h.SetSum(0) + hAttrs := h.Attributes() + hAttrs.PutStr("histogram_1_id", "value1") + hAttrs.PutStr("histogram_2_id", "value2") + hAttrs.PutStr("histogram_3_id", "value3") + + n := pmetric.NewNumberDataPoint() + n.SetIntValue(777) + nAttrs := n.Attributes() + nAttrs.PutStr("gauge_1_id", "value1") + nAttrs.PutStr("gauge_2_id", "value2") + nAttrs.PutStr("gauge_3_id", "value3") + return newMetrics(histogramRuntimeMetricName, h, numberRuntimeMetricName, n) + }, + setConfig: func(t *testing.T) { + config.SetFeatures(t, config.EKSFargate) + config.Datadog.SetDefault("tags", []string{"serverless_tag1:test1", "serverless_tag2:test2", "serverless_tag3:test3"}) + t.Cleanup(func() { + config.Datadog.SetDefault("tags", []string{}) + }) + }, + wantSketchTags: tagset.NewCompositeTags( + []string{ + "serverless_tag1:test1", "serverless_tag2:test2", "serverless_tag3:test3", + "histogram_1_id:value1", "histogram_2_id:value2", "histogram_3_id:value3", + }, + nil, + ), + wantSerieTags: tagset.NewCompositeTags( + []string{ + "serverless_tag1:test1", "serverless_tag2:test2", "serverless_tag3:test3", + "gauge_1_id:value1", "gauge_2_id:value2", "gauge_3_id:value3", + }, + nil, + ), + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -154,6 +214,11 @@ func Test_ConsumeMetrics_Tags(t *testing.T) { if s.Name == "datadog.agent.otlp.metrics" { assert.Equal(t, tagset.NewCompositeTags([]string{}, nil), s.Tags) } + if s.Name == "datadog.agent.otlp.runtime_metrics" { + assert.True(t, s.Tags.Find(func(tag string) bool { + return tag == "language:go" || tag == "language:dotnet" + })) + } if s.Name == numberMetricName { if tt.wantSerieTags.Len() > 0 { assert.Equal(t, tt.wantSerieTags, s.Tags) diff --git a/pkg/otlp/map_provider.go b/pkg/otlp/map_provider.go index bcc7d28fae393..77d5d1a1cb3d6 100644 --- a/pkg/otlp/map_provider.go +++ b/pkg/otlp/map_provider.go @@ -23,34 +23,6 @@ func buildKey(keys ...string) string { return strings.Join(keys, confmap.KeyDelimiter) } -// defaultTracesConfig is the base traces OTLP pipeline configuration. -// This pipeline is extended through the datadog.yaml configuration values. -// It is written in YAML because it is easier to read and write than a map. -const defaultTracesConfig string = ` -receivers: - otlp: - -processors: - batch: - timeout: 10s - -exporters: - otlp: - tls: - insecure: true - compression: none - -service: - telemetry: - metrics: - level: none - pipelines: - traces: - receivers: [otlp] - processors: [batch] - exporters: [otlp] -` - func buildTracesMap(tracePort uint) (*confmap.Conf, error) { baseMap, err := configutils.NewMapFromYAMLString(defaultTracesConfig) if err != nil { @@ -65,29 +37,6 @@ func buildTracesMap(tracePort uint) (*confmap.Conf, error) { return baseMap, err } -// defaultMetricsConfig is the metrics OTLP pipeline configuration. -const defaultMetricsConfig string = ` -receivers: - otlp: - -processors: - batch: - timeout: 10s - -exporters: - serializer: - -service: - telemetry: - metrics: - level: none - pipelines: - metrics: - receivers: [otlp] - processors: [batch] - exporters: [serializer] -` - func buildMetricsMap(cfg PipelineConfig) (*confmap.Conf, error) { baseMap, err := configutils.NewMapFromYAMLString(defaultMetricsConfig) if err != nil { diff --git a/pkg/otlp/map_provider_config_not_serverless.go b/pkg/otlp/map_provider_config_not_serverless.go new file mode 100644 index 0000000000000..8c77de11b2949 --- /dev/null +++ b/pkg/otlp/map_provider_config_not_serverless.go @@ -0,0 +1,60 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2021-present Datadog, Inc. + +//go:build otlp && !serverless +// +build otlp,!serverless + +package otlp + +// defaultTracesConfig is the base traces OTLP pipeline configuration. +// This pipeline is extended through the datadog.yaml configuration values. +// It is written in YAML because it is easier to read and write than a map. +const defaultTracesConfig string = ` +receivers: + otlp: + +processors: + batch: + timeout: 10s + +exporters: + otlp: + tls: + insecure: true + compression: none + +service: + telemetry: + metrics: + level: none + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlp] +` + +// defaultMetricsConfig is the metrics OTLP pipeline configuration. +const defaultMetricsConfig string = ` +receivers: + otlp: + +processors: + batch: + timeout: 10s + +exporters: + serializer: + +service: + telemetry: + metrics: + level: none + pipelines: + metrics: + receivers: [otlp] + processors: [batch] + exporters: [serializer] +` diff --git a/pkg/otlp/map_provider_config_serverless.go b/pkg/otlp/map_provider_config_serverless.go new file mode 100644 index 0000000000000..1b372eed3371a --- /dev/null +++ b/pkg/otlp/map_provider_config_serverless.go @@ -0,0 +1,50 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2021-present Datadog, Inc. + +//go:build otlp && serverless +// +build otlp,serverless + +package otlp + +// defaultTracesConfig is the base traces OTLP pipeline configuration. +// This pipeline is extended through the datadog.yaml configuration values. +// It is written in YAML because it is easier to read and write than a map. +const defaultTracesConfig string = ` +receivers: + otlp: + +exporters: + otlp: + tls: + insecure: true + compression: none + +service: + telemetry: + metrics: + level: none + pipelines: + traces: + receivers: [otlp] + exporters: [otlp] +` + +// defaultMetricsConfig is the metrics OTLP pipeline configuration. +const defaultMetricsConfig string = ` +receivers: + otlp: + +exporters: + serializer: + +service: + telemetry: + metrics: + level: none + pipelines: + metrics: + receivers: [otlp] + exporters: [serializer] +` diff --git a/pkg/otlp/map_provider_test.go b/pkg/otlp/map_provider_not_serverless_test.go similarity index 99% rename from pkg/otlp/map_provider_test.go rename to pkg/otlp/map_provider_not_serverless_test.go index b85e248cf1156..187274bf0aebd 100644 --- a/pkg/otlp/map_provider_test.go +++ b/pkg/otlp/map_provider_not_serverless_test.go @@ -3,7 +3,8 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2021-present Datadog, Inc. -//go:build otlp && test +//go:build otlp && !serverless && test +// +build otlp,!serverless,test package otlp diff --git a/pkg/otlp/map_provider_serverless_test.go b/pkg/otlp/map_provider_serverless_test.go new file mode 100644 index 0000000000000..bf904661e42dc --- /dev/null +++ b/pkg/otlp/map_provider_serverless_test.go @@ -0,0 +1,476 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2021-present Datadog, Inc. + +//go:build otlp && serverless && test +// +build otlp,serverless,test + +package otlp + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/confmap" + + "github.com/DataDog/datadog-agent/pkg/otlp/internal/testutil" +) + +func TestNewMap(t *testing.T) { + tests := []struct { + name string + pcfg PipelineConfig + ocfg map[string]interface{} + }{ + { + name: "only gRPC, only Traces", + pcfg: PipelineConfig{ + OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 1234, 0), + TracePort: 5003, + TracesEnabled: true, + Debug: map[string]interface{}{ + "loglevel": "disabled", + }, + }, + ocfg: map[string]interface{}{ + "receivers": map[string]interface{}{ + "otlp": map[string]interface{}{ + "protocols": map[string]interface{}{ + "grpc": map[string]interface{}{ + "endpoint": "bindhost:1234", + }, + }, + }, + }, + "exporters": map[string]interface{}{ + "otlp": map[string]interface{}{ + "tls": map[string]interface{}{ + "insecure": true, + }, + "compression": "none", + "endpoint": "localhost:5003", + }, + }, + "service": map[string]interface{}{ + "telemetry": map[string]interface{}{"metrics": map[string]interface{}{"level": "none"}}, + "pipelines": map[string]interface{}{ + "traces": map[string]interface{}{ + "receivers": []interface{}{"otlp"}, + "exporters": []interface{}{"otlp"}, + }, + }, + }, + }, + }, + { + name: "only HTTP, metrics and traces", + pcfg: PipelineConfig{ + OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234), + TracePort: 5003, + TracesEnabled: true, + MetricsEnabled: true, + Metrics: map[string]interface{}{ + "delta_ttl": 2000, + "resource_attributes_as_tags": true, + "instrumentation_library_metadata_as_tags": true, + "instrumentation_scope_metadata_as_tags": true, + "histograms": map[string]interface{}{ + "mode": "counters", + "send_count_sum_metrics": true, + }, + }, + Debug: map[string]interface{}{ + "loglevel": "disabled", + }, + }, + ocfg: map[string]interface{}{ + "receivers": map[string]interface{}{ + "otlp": map[string]interface{}{ + "protocols": map[string]interface{}{ + "http": map[string]interface{}{ + "endpoint": "bindhost:1234", + }, + }, + }, + }, + "exporters": map[string]interface{}{ + "otlp": map[string]interface{}{ + "tls": map[string]interface{}{ + "insecure": true, + }, + "compression": "none", + "endpoint": "localhost:5003", + }, + "serializer": map[string]interface{}{ + "metrics": map[string]interface{}{ + "delta_ttl": 2000, + "resource_attributes_as_tags": true, + "instrumentation_library_metadata_as_tags": true, + "instrumentation_scope_metadata_as_tags": true, + "histograms": map[string]interface{}{ + "mode": "counters", + "send_count_sum_metrics": true, + }, + }, + }, + }, + "service": map[string]interface{}{ + "telemetry": map[string]interface{}{"metrics": map[string]interface{}{"level": "none"}}, + "pipelines": map[string]interface{}{ + "traces": map[string]interface{}{ + "receivers": []interface{}{"otlp"}, + "exporters": []interface{}{"otlp"}, + }, + "metrics": map[string]interface{}{ + "receivers": []interface{}{"otlp"}, + "exporters": []interface{}{"serializer"}, + }, + }, + }, + }, + }, + { + name: "only HTTP, metrics and traces, invalid loglevel(ignored)", + pcfg: PipelineConfig{ + OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234), + TracePort: 5003, + TracesEnabled: true, + MetricsEnabled: true, + Metrics: map[string]interface{}{ + "delta_ttl": 2000, + "resource_attributes_as_tags": true, + "instrumentation_library_metadata_as_tags": true, + "instrumentation_scope_metadata_as_tags": true, + "histograms": map[string]interface{}{ + "mode": "counters", + "send_count_sum_metrics": true, + }, + }, + Debug: map[string]interface{}{ + "loglevel": "foo", + }, + }, + ocfg: map[string]interface{}{ + "receivers": map[string]interface{}{ + "otlp": map[string]interface{}{ + "protocols": map[string]interface{}{ + "http": map[string]interface{}{ + "endpoint": "bindhost:1234", + }, + }, + }, + }, + "exporters": map[string]interface{}{ + "otlp": map[string]interface{}{ + "tls": map[string]interface{}{ + "insecure": true, + }, + "compression": "none", + "endpoint": "localhost:5003", + }, + "serializer": map[string]interface{}{ + "metrics": map[string]interface{}{ + "delta_ttl": 2000, + "resource_attributes_as_tags": true, + "instrumentation_library_metadata_as_tags": true, + "instrumentation_scope_metadata_as_tags": true, + "histograms": map[string]interface{}{ + "mode": "counters", + "send_count_sum_metrics": true, + }, + }, + }, + }, + "service": map[string]interface{}{ + "telemetry": map[string]interface{}{"metrics": map[string]interface{}{"level": "none"}}, + "pipelines": map[string]interface{}{ + "traces": map[string]interface{}{ + "receivers": []interface{}{"otlp"}, + "exporters": []interface{}{"otlp"}, + }, + "metrics": map[string]interface{}{ + "receivers": []interface{}{"otlp"}, + "exporters": []interface{}{"serializer"}, + }, + }, + }, + }, + }, + { + name: "with both", + pcfg: PipelineConfig{ + OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 1234, 5678), + TracePort: 5003, + TracesEnabled: true, + Debug: map[string]interface{}{ + "loglevel": "disabled", + }, + }, + ocfg: map[string]interface{}{ + "receivers": map[string]interface{}{ + "otlp": map[string]interface{}{ + "protocols": map[string]interface{}{ + "grpc": map[string]interface{}{ + "endpoint": "bindhost:1234", + }, + "http": map[string]interface{}{ + "endpoint": "bindhost:5678", + }, + }, + }, + }, + "exporters": map[string]interface{}{ + "otlp": map[string]interface{}{ + "tls": map[string]interface{}{ + "insecure": true, + }, + "compression": "none", + "endpoint": "localhost:5003", + }, + }, + "service": map[string]interface{}{ + "telemetry": map[string]interface{}{"metrics": map[string]interface{}{"level": "none"}}, + "pipelines": map[string]interface{}{ + "traces": map[string]interface{}{ + "receivers": []interface{}{"otlp"}, + "exporters": []interface{}{"otlp"}, + }, + }, + }, + }, + }, + { + name: "only HTTP, only metrics", + pcfg: PipelineConfig{ + OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234), + TracePort: 5003, + MetricsEnabled: true, + Metrics: map[string]interface{}{ + "delta_ttl": 1500, + "resource_attributes_as_tags": false, + "instrumentation_library_metadata_as_tags": false, + "instrumentation_scope_metadata_as_tags": false, + "histograms": map[string]interface{}{ + "mode": "nobuckets", + "send_count_sum_metrics": true, + }, + }, + Debug: map[string]interface{}{ + "loglevel": "disabled", + }, + }, + ocfg: map[string]interface{}{ + "receivers": map[string]interface{}{ + "otlp": map[string]interface{}{ + "protocols": map[string]interface{}{ + "http": map[string]interface{}{ + "endpoint": "bindhost:1234", + }, + }, + }, + }, + "exporters": map[string]interface{}{ + "serializer": map[string]interface{}{ + "metrics": map[string]interface{}{ + "delta_ttl": 1500, + "resource_attributes_as_tags": false, + "instrumentation_library_metadata_as_tags": false, + "instrumentation_scope_metadata_as_tags": false, + "histograms": map[string]interface{}{ + "mode": "nobuckets", + "send_count_sum_metrics": true, + }, + }, + }, + }, + "service": map[string]interface{}{ + "telemetry": map[string]interface{}{"metrics": map[string]interface{}{"level": "none"}}, + "pipelines": map[string]interface{}{ + "metrics": map[string]interface{}{ + "receivers": []interface{}{"otlp"}, + "exporters": []interface{}{"serializer"}, + }, + }, + }, + }, + }, + { + name: "only gRPC, only Traces, logging info", + pcfg: PipelineConfig{ + OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 1234, 0), + TracePort: 5003, + TracesEnabled: true, + Debug: map[string]interface{}{ + "loglevel": "info", + }, + }, + ocfg: map[string]interface{}{ + "receivers": map[string]interface{}{ + "otlp": map[string]interface{}{ + "protocols": map[string]interface{}{ + "grpc": map[string]interface{}{ + "endpoint": "bindhost:1234", + }, + }, + }, + }, + "exporters": map[string]interface{}{ + "otlp": map[string]interface{}{ + "tls": map[string]interface{}{ + "insecure": true, + }, + "compression": "none", + "endpoint": "localhost:5003", + }, + "logging": map[string]interface{}{ + "loglevel": "info", + }, + }, + "service": map[string]interface{}{ + "telemetry": map[string]interface{}{"metrics": map[string]interface{}{"level": "none"}}, + "pipelines": map[string]interface{}{ + "traces": map[string]interface{}{ + "receivers": []interface{}{"otlp"}, + "exporters": []interface{}{"otlp", "logging"}, + }, + }, + }, + }, + }, + { + name: "only HTTP, only metrics, logging debug", + pcfg: PipelineConfig{ + OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234), + TracePort: 5003, + MetricsEnabled: true, + Metrics: map[string]interface{}{ + "delta_ttl": 1500, + "resource_attributes_as_tags": false, + "instrumentation_library_metadata_as_tags": false, + "histograms": map[string]interface{}{ + "mode": "nobuckets", + "send_count_sum_metrics": true, + }, + }, + Debug: map[string]interface{}{ + "loglevel": "debug", + }, + }, + ocfg: map[string]interface{}{ + "receivers": map[string]interface{}{ + "otlp": map[string]interface{}{ + "protocols": map[string]interface{}{ + "http": map[string]interface{}{ + "endpoint": "bindhost:1234", + }, + }, + }, + }, + "exporters": map[string]interface{}{ + "serializer": map[string]interface{}{ + "metrics": map[string]interface{}{ + "delta_ttl": 1500, + "resource_attributes_as_tags": false, + "instrumentation_library_metadata_as_tags": false, + "histograms": map[string]interface{}{ + "mode": "nobuckets", + "send_count_sum_metrics": true, + }, + }, + }, + "logging": map[string]interface{}{ + "loglevel": "debug", + }, + }, + "service": map[string]interface{}{ + "telemetry": map[string]interface{}{"metrics": map[string]interface{}{"level": "none"}}, + "pipelines": map[string]interface{}{ + "metrics": map[string]interface{}{ + "receivers": []interface{}{"otlp"}, + "exporters": []interface{}{"serializer", "logging"}, + }, + }, + }, + }, + }, + { + name: "only HTTP, metrics and traces, logging warn", + pcfg: PipelineConfig{ + OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234), + TracePort: 5003, + TracesEnabled: true, + MetricsEnabled: true, + Metrics: map[string]interface{}{ + "delta_ttl": 2000, + "resource_attributes_as_tags": true, + "instrumentation_library_metadata_as_tags": true, + "histograms": map[string]interface{}{ + "mode": "counters", + "send_count_sum_metrics": true, + }, + }, + Debug: map[string]interface{}{ + "loglevel": "warn", + }, + }, + ocfg: map[string]interface{}{ + "receivers": map[string]interface{}{ + "otlp": map[string]interface{}{ + "protocols": map[string]interface{}{ + "http": map[string]interface{}{ + "endpoint": "bindhost:1234", + }, + }, + }, + }, + "exporters": map[string]interface{}{ + "otlp": map[string]interface{}{ + "tls": map[string]interface{}{ + "insecure": true, + }, + "compression": "none", + "endpoint": "localhost:5003", + }, + "serializer": map[string]interface{}{ + "metrics": map[string]interface{}{ + "delta_ttl": 2000, + "resource_attributes_as_tags": true, + "instrumentation_library_metadata_as_tags": true, + "histograms": map[string]interface{}{ + "mode": "counters", + "send_count_sum_metrics": true, + }, + }, + }, + "logging": map[string]interface{}{ + "loglevel": "warn", + }, + }, + "service": map[string]interface{}{ + "telemetry": map[string]interface{}{"metrics": map[string]interface{}{"level": "none"}}, + "pipelines": map[string]interface{}{ + "traces": map[string]interface{}{ + "receivers": []interface{}{"otlp"}, + "exporters": []interface{}{"otlp", "logging"}, + }, + "metrics": map[string]interface{}{ + "receivers": []interface{}{"otlp"}, + "exporters": []interface{}{"serializer", "logging"}, + }, + }, + }, + }, + }, + } + + for _, testInstance := range tests { + t.Run(testInstance.name, func(t *testing.T) { + cfg, err := buildMap(testInstance.pcfg) + require.NoError(t, err) + tcfg := confmap.NewFromStringMap(testInstance.ocfg) + assert.Equal(t, tcfg.ToStringMap(), cfg.ToStringMap()) + }) + } +} diff --git a/pkg/process/checks/enabled_checks_test.go b/pkg/process/checks/enabled_checks_test.go index eef595724d282..52e0f5c86c55f 100644 --- a/pkg/process/checks/enabled_checks_test.go +++ b/pkg/process/checks/enabled_checks_test.go @@ -6,6 +6,7 @@ package checks import ( + "runtime" "testing" "github.com/stretchr/testify/assert" @@ -89,7 +90,11 @@ func TestConnectionsCheck(t *testing.T) { scfg.Set("system_probe_config.enabled", true) enabledChecks := getEnabledChecks(t, cfg, scfg) - assertContainsCheck(t, enabledChecks, ConnectionsCheckName) + if runtime.GOOS == "darwin" { + assertNotContainsCheck(t, enabledChecks, ConnectionsCheckName) + } else { + assertContainsCheck(t, enabledChecks, ConnectionsCheckName) + } }) t.Run("disabled", func(t *testing.T) { diff --git a/pkg/process/checks/system_info_windows.go b/pkg/process/checks/system_info_windows.go index 2b1e3ee32539e..03f3f9ad67a3d 100644 --- a/pkg/process/checks/system_info_windows.go +++ b/pkg/process/checks/system_info_windows.go @@ -9,8 +9,8 @@ import ( "fmt" "strconv" - "github.com/DataDog/gohai/cpu" - "github.com/DataDog/gohai/platform" + "github.com/DataDog/datadog-agent/pkg/gohai/cpu" + "github.com/DataDog/datadog-agent/pkg/gohai/platform" "github.com/DataDog/datadog-agent/pkg/util/winutil" diff --git a/pkg/process/runner/collector_api_test.go b/pkg/process/runner/collector_api_test.go index 865e5a669ce20..84f19d92f8fd2 100644 --- a/pkg/process/runner/collector_api_test.go +++ b/pkg/process/runner/collector_api_test.go @@ -19,6 +19,7 @@ import ( ddconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/process/checks" + "github.com/DataDog/datadog-agent/pkg/process/runner/endpoint" apicfg "github.com/DataDog/datadog-agent/pkg/process/util/api/config" "github.com/DataDog/datadog-agent/pkg/process/util/api/headers" "github.com/DataDog/datadog-agent/pkg/version" @@ -87,7 +88,7 @@ func TestSendConnectionsMessage(t *testing.T) { assert.Equal(t, "/api/v1/connections", req.uri) assert.Equal(t, testHostName, req.headers.Get(headers.HostHeader)) - apiEps, err := GetAPIEndpoints(cfg) + apiEps, err := endpoint.GetAPIEndpoints(cfg) assert.NoError(t, err) assert.Equal(t, apiEps[0].APIKey, req.headers.Get("DD-Api-Key")) @@ -127,7 +128,7 @@ func TestSendContainerMessage(t *testing.T) { assert.Equal(t, "/api/v1/container", req.uri) assert.Equal(t, testHostName, req.headers.Get(headers.HostHeader)) - eps, err := GetAPIEndpoints(cfg) + eps, err := endpoint.GetAPIEndpoints(cfg) assert.NoError(t, err) assert.Equal(t, eps[0].APIKey, req.headers.Get("DD-Api-Key")) assert.Equal(t, "1", req.headers.Get(headers.ContainerCountHeader)) @@ -165,7 +166,7 @@ func TestSendProcMessage(t *testing.T) { assert.Equal(t, "/api/v1/collector", req.uri) assert.Equal(t, testHostName, req.headers.Get(headers.HostHeader)) - eps, err := GetAPIEndpoints(cfg) + eps, err := endpoint.GetAPIEndpoints(cfg) assert.NoError(t, err) assert.Equal(t, eps[0].APIKey, req.headers.Get("DD-Api-Key")) assert.Equal(t, "1", req.headers.Get(headers.ContainerCountHeader)) @@ -206,7 +207,7 @@ func TestSendProcessDiscoveryMessage(t *testing.T) { assert.Equal(t, "/api/v1/discovery", req.uri) assert.Equal(t, testHostName, req.headers.Get(headers.HostHeader)) - eps, err := GetAPIEndpoints(cfg) + eps, err := endpoint.GetAPIEndpoints(cfg) assert.NoError(t, err) assert.Equal(t, eps[0].APIKey, req.headers.Get("DD-Api-Key")) assert.Equal(t, "0", req.headers.Get(headers.ContainerCountHeader)) @@ -256,7 +257,7 @@ func TestSendProcessEventMessage(t *testing.T) { assert.Equal(t, "/api/v2/proclcycle", req.uri) assert.Equal(t, testHostName, req.headers.Get(headers.HostHeader)) - eps, err := getEventsAPIEndpoints(cfg) + eps, err := endpoint.GetEventsAPIEndpoints(cfg) assert.NoError(t, err) assert.Equal(t, eps[0].APIKey, req.headers.Get("DD-Api-Key")) assert.Equal(t, "0", req.headers.Get(headers.ContainerCountHeader)) @@ -304,7 +305,7 @@ func TestSendProcMessageWithRetry(t *testing.T) { timestamps := make(map[string]struct{}) for _, req := range requests { assert.Equal(t, testHostName, req.headers.Get(headers.HostHeader)) - eps, err := GetAPIEndpoints(cfg) + eps, err := endpoint.GetAPIEndpoints(cfg) assert.NoError(t, err) assert.Equal(t, eps[0].APIKey, req.headers.Get("DD-Api-Key")) assert.Equal(t, "1", req.headers.Get(headers.ContainerCountHeader)) @@ -571,7 +572,8 @@ func runCollectorTestWithAPIKeys(t *testing.T, check checks.Check, epConfig *end c, err := NewRunnerWithChecks(mockConfig, []checks.Check{check}, true, nil) check.Init(nil, hostInfo) assert.NoError(t, err) - c.Submitter, err = NewSubmitter(mockConfig, hostInfo.HostName) + forwarders := newForwardersMock(t, mockConfig) + c.Submitter, err = NewSubmitter(mockConfig, forwarders, hostInfo.HostName) require.NoError(t, err) err = c.Submitter.Start() diff --git a/pkg/process/runner/endpoints.go b/pkg/process/runner/endpoint/endpoints.go similarity index 95% rename from pkg/process/runner/endpoints.go rename to pkg/process/runner/endpoint/endpoints.go index 3132ab333ca16..275b9cace3794 100644 --- a/pkg/process/runner/endpoints.go +++ b/pkg/process/runner/endpoint/endpoints.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package runner +package endpoint import ( "fmt" @@ -18,7 +18,7 @@ func GetAPIEndpoints(config ddconfig.ConfigReader) (eps []apicfg.Endpoint, err e return getAPIEndpointsWithKeys(config, "https://process.", "process_config.process_dd_url", "process_config.additional_endpoints") } -func getEventsAPIEndpoints(config ddconfig.ConfigReader) (eps []apicfg.Endpoint, err error) { +func GetEventsAPIEndpoints(config ddconfig.ConfigReader) (eps []apicfg.Endpoint, err error) { return getAPIEndpointsWithKeys(config, "https://process-events.", "process_config.events_dd_url", "process_config.events_additional_endpoints") } diff --git a/pkg/process/runner/endpoints_test.go b/pkg/process/runner/endpoints_test.go index e3614eaa49bb0..efae55c682070 100644 --- a/pkg/process/runner/endpoints_test.go +++ b/pkg/process/runner/endpoints_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/process/runner/endpoint" apicfg "github.com/DataDog/datadog-agent/pkg/process/util/api/config" ) @@ -93,7 +94,7 @@ func TestGetAPIEndpoints(t *testing.T) { cfg.Set("process_config.additional_endpoints", tc.additionalEndpoints) } - if eps, err := GetAPIEndpoints(cfg); tc.error { + if eps, err := endpoint.GetAPIEndpoints(cfg); tc.error { assert.Error(t, err) } else { assert.NoError(t, err) @@ -150,13 +151,13 @@ func TestGetAPIEndpointsSite(t *testing.T) { cfg.Set("process_config.events_dd_url", tc.eventsDDURL) } - eps, err := GetAPIEndpoints(cfg) + eps, err := endpoint.GetAPIEndpoints(cfg) assert.NoError(t, err) mainEndpoint := eps[0] assert.Equal(t, tc.expectedHostname, mainEndpoint.Endpoint.Hostname()) - eventsEps, err := getEventsAPIEndpoints(cfg) + eventsEps, err := endpoint.GetEventsAPIEndpoints(cfg) assert.NoError(t, err) mainEventEndpoint := eventsEps[0] @@ -298,11 +299,11 @@ func TestGetConcurrentAPIEndpoints(t *testing.T) { cfg.Set("process_config.events_additional_endpoints", tc.additionalEventsEndpoints) } - eps, err := GetAPIEndpoints(cfg) + eps, err := endpoint.GetAPIEndpoints(cfg) assert.NoError(t, err) assert.ElementsMatch(t, tc.expectedEndpoints, eps) - eventsEps, err := getEventsAPIEndpoints(cfg) + eventsEps, err := endpoint.GetEventsAPIEndpoints(cfg) assert.NoError(t, err) assert.ElementsMatch(t, tc.expectedEventsEndpoints, eventsEps) }) diff --git a/pkg/process/runner/submitter.go b/pkg/process/runner/submitter.go index 41d7996d08383..9ecea1be5708c 100644 --- a/pkg/process/runner/submitter.go +++ b/pkg/process/runner/submitter.go @@ -16,14 +16,18 @@ import ( model "github.com/DataDog/agent-payload/v5/process" "github.com/DataDog/datadog-agent/comp/core/config" + "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" forwarder "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder/transaction" + "github.com/DataDog/datadog-agent/comp/process/forwarders" "github.com/DataDog/datadog-agent/comp/process/types" + ddconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/resolver" "github.com/DataDog/datadog-agent/pkg/orchestrator" oconfig "github.com/DataDog/datadog-agent/pkg/orchestrator/config" "github.com/DataDog/datadog-agent/pkg/process/checks" + "github.com/DataDog/datadog-agent/pkg/process/runner/endpoint" "github.com/DataDog/datadog-agent/pkg/process/statsd" "github.com/DataDog/datadog-agent/pkg/process/status" "github.com/DataDog/datadog-agent/pkg/process/util/api" @@ -51,11 +55,11 @@ type CheckSubmitter struct { podResults *api.WeightedQueue // Forwarders - processForwarder *forwarder.DefaultForwarder - rtProcessForwarder *forwarder.DefaultForwarder - connectionsForwarder *forwarder.DefaultForwarder + processForwarder defaultforwarder.Component + rtProcessForwarder defaultforwarder.Component + connectionsForwarder defaultforwarder.Component podForwarder *forwarder.DefaultForwarder - eventForwarder *forwarder.DefaultForwarder + eventForwarder defaultforwarder.Component orchestrator *oconfig.OrchestratorConfig hostname string @@ -75,7 +79,7 @@ type CheckSubmitter struct { rtNotifierChan chan types.RTResponse } -func NewSubmitter(config config.Component, hostname string) (*CheckSubmitter, error) { +func NewSubmitter(config config.Component, forwarders forwarders.Component, hostname string) (*CheckSubmitter, error) { queueBytes := config.GetInt("process_config.process_queue_bytes") if queueBytes <= 0 { log.Warnf("Invalid queue bytes size: %d. Using default value: %d", queueBytes, ddconfig.DefaultProcessQueueBytes) @@ -119,34 +123,20 @@ func NewSubmitter(config config.Component, hostname string) (*CheckSubmitter, er status.UpdateDropCheckPayloads(dropCheckPayloads) // Forwarder initialization - processAPIEndpoints, err := GetAPIEndpoints(config) + processAPIEndpoints, err := endpoint.GetAPIEndpoints(config) if err != nil { return nil, err } - processForwarderOpts := forwarder.NewOptionsWithResolvers(config, resolver.NewSingleDomainResolvers(apicfg.KeysPerDomains(processAPIEndpoints))) - processForwarderOpts.DisableAPIKeyChecking = true - processForwarderOpts.RetryQueuePayloadsTotalMaxSize = queueBytes // Allow more in-flight requests than the default - processForwarder := forwarder.NewDefaultForwarder(config, processForwarderOpts) - - // rt forwarder reuses processForwarder's config - rtProcessForwarder := forwarder.NewDefaultForwarder(config, processForwarderOpts) - - // connections forwarder reuses processForwarder's config - connectionsForwarder := forwarder.NewDefaultForwarder(config, processForwarderOpts) podForwarderOpts := forwarder.NewOptionsWithResolvers(config, resolver.NewSingleDomainResolvers(apicfg.KeysPerDomains(orchestrator.OrchestratorEndpoints))) podForwarderOpts.DisableAPIKeyChecking = true podForwarderOpts.RetryQueuePayloadsTotalMaxSize = queueBytes // Allow more in-flight requests than the default podForwarder := forwarder.NewDefaultForwarder(config, podForwarderOpts) - processEventsAPIEndpoints, err := getEventsAPIEndpoints(config) + processEventsAPIEndpoints, err := endpoint.GetEventsAPIEndpoints(config) if err != nil { return nil, err } - eventForwarderOpts := forwarder.NewOptionsWithResolvers(config, resolver.NewSingleDomainResolvers(apicfg.KeysPerDomains(processEventsAPIEndpoints))) - eventForwarderOpts.DisableAPIKeyChecking = true - eventForwarderOpts.RetryQueuePayloadsTotalMaxSize = queueBytes // Allow more in-flight requests than the default - eventForwarder := forwarder.NewDefaultForwarder(config, eventForwarderOpts) printStartMessage(hostname, processAPIEndpoints, processEventsAPIEndpoints, orchestrator.OrchestratorEndpoints) return &CheckSubmitter{ @@ -156,11 +146,11 @@ func NewSubmitter(config config.Component, hostname string) (*CheckSubmitter, er connectionsResults: connectionsResults, podResults: podResults, - processForwarder: processForwarder, - rtProcessForwarder: rtProcessForwarder, - connectionsForwarder: connectionsForwarder, + processForwarder: forwarders.GetProcessForwarder(), + rtProcessForwarder: forwarders.GetRTProcessForwarder(), + connectionsForwarder: forwarders.GetConnectionsForwarder(), podForwarder: podForwarder, - eventForwarder: eventForwarder, + eventForwarder: forwarders.GetEventForwarder(), orchestrator: orchestrator, hostname: hostname, @@ -361,7 +351,6 @@ func (s *CheckSubmitter) consumePayloads(results *api.WeightedQueue, fwd forward updateRTStatus = true responses, err = fwd.SubmitRTContainerChecks(forwarderPayload, payload.headers) case checks.ConnectionsCheckName: - updateRTStatus = true responses, err = fwd.SubmitConnectionChecks(forwarderPayload, payload.headers) // Pod check metadata case checks.PodCheckName: diff --git a/pkg/process/runner/submitter_test.go b/pkg/process/runner/submitter_test.go index befc5f15bd8bd..1565a1c3a5405 100644 --- a/pkg/process/runner/submitter_test.go +++ b/pkg/process/runner/submitter_test.go @@ -11,9 +11,12 @@ import ( "time" "github.com/stretchr/testify/assert" + "go.uber.org/fx" model "github.com/DataDog/agent-payload/v5/process" "github.com/DataDog/datadog-agent/comp/core/config" + "github.com/DataDog/datadog-agent/comp/core/log" + "github.com/DataDog/datadog-agent/comp/process/forwarders" ddconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/process/util/api/headers" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -59,8 +62,8 @@ func TestNewCollectorQueueSize(t *testing.T) { if tc.override { mockConfig.Set("process_config.queue_size", tc.queueSize) } - - c, err := NewSubmitter(mockConfig, testHostName) + forwarders := newForwardersMock(t, mockConfig) + c, err := NewSubmitter(mockConfig, forwarders, testHostName) assert.NoError(t, err) assert.Equal(t, tc.expectedQueueSize, c.processResults.MaxSize()) assert.Equal(t, tc.expectedQueueSize, c.podResults.MaxSize()) @@ -107,8 +110,8 @@ func TestNewCollectorRTQueueSize(t *testing.T) { if tc.override { mockConfig.Set("process_config.rt_queue_size", tc.queueSize) } - - c, err := NewSubmitter(mockConfig, testHostName) + forwarders := newForwardersMock(t, mockConfig) + c, err := NewSubmitter(mockConfig, forwarders, testHostName) assert.NoError(t, err) assert.Equal(t, tc.expectedQueueSize, c.rtProcessResults.MaxSize()) }) @@ -154,8 +157,8 @@ func TestNewCollectorProcessQueueBytes(t *testing.T) { if tc.override { mockConfig.Set("process_config.process_queue_bytes", tc.queueBytes) } - - s, err := NewSubmitter(mockConfig, testHostName) + forwarders := newForwardersMock(t, mockConfig) + s, err := NewSubmitter(mockConfig, forwarders, testHostName) assert.NoError(t, err) assert.Equal(t, int64(tc.expectedQueueSize), s.processResults.MaxWeight()) assert.Equal(t, int64(tc.expectedQueueSize), s.rtProcessResults.MaxWeight()) @@ -165,8 +168,8 @@ func TestNewCollectorProcessQueueBytes(t *testing.T) { } func TestCollectorMessagesToCheckResult(t *testing.T) { - config := fxutil.Test[config.Component](t, config.MockModule) - submitter, err := NewSubmitter(config, testHostName) + deps := newSubmitterDeps(t) + submitter, err := NewSubmitter(deps.Config, deps.Forwarders, testHostName) assert.NoError(t, err) now := time.Now() @@ -284,8 +287,8 @@ func TestCollectorMessagesToCheckResult(t *testing.T) { } func Test_getRequestID(t *testing.T) { - config := fxutil.Test[config.Component](t, config.MockModule) - s, err := NewSubmitter(config, testHostName) + deps := newSubmitterDeps(t) + s, err := NewSubmitter(deps.Config, deps.Forwarders, testHostName) assert.NoError(t, err) fixedDate1 := time.Date(2022, 9, 1, 0, 0, 1, 0, time.Local) @@ -312,3 +315,22 @@ func Test_getRequestID(t *testing.T) { id5 := s.getRequestID(fixedDate1, 1) assert.NotEqual(t, id1, id5) } + +type submitterDeps struct { + fx.In + Config config.Component + Forwarders forwarders.Component +} + +func newSubmitterDeps(t *testing.T) submitterDeps { + return fxutil.Test[submitterDeps](t, getForwardersMockModules(nil)) +} + +func newForwardersMock(t *testing.T, config ddconfig.Config) forwarders.Component { + overrides := config.AllSettings() + return fxutil.Test[forwarders.Component](t, getForwardersMockModules(overrides)) +} + +func getForwardersMockModules(configOverrides map[string]interface{}) fx.Option { + return fx.Options(config.MockModule, fx.Replace(config.MockParams{Overrides: configOverrides}), forwarders.MockModule, log.MockModule) +} diff --git a/pkg/process/util/containers.go b/pkg/process/util/containers.go index 23cd9bff308af..733552a71aa23 100644 --- a/pkg/process/util/containers.go +++ b/pkg/process/util/containers.go @@ -207,6 +207,13 @@ func computeContainerStats(hostCPUCount float64, inStats *metrics.ContainerStats outStats.MemCache = uint64(statValue(inStats.Memory.Cache, 0)) outStats.MemRss = uint64(statValue(inStats.Memory.RSS, 0)) outStats.MemUsage = uint64(statValue(inStats.Memory.UsageTotal, 0)) + + // On Linux OOM Killer (memory limit) uses ~WorkingSet, on Windows it's CommitBytes + if inStats.Memory.WorkingSet != nil { + outStats.MemAccounted = uint64(*inStats.Memory.WorkingSet) + } else if inStats.Memory.CommitBytes != nil { + outStats.MemAccounted = uint64(*inStats.Memory.CommitBytes) + } } if inStats.PID != nil { diff --git a/pkg/process/util/containers_test.go b/pkg/process/util/containers_test.go index 564906bede4b2..d9e7f679436a8 100644 --- a/pkg/process/util/containers_test.go +++ b/pkg/process/util/containers_test.go @@ -168,11 +168,13 @@ func TestGetContainers(t *testing.T) { }, }) - // cID5 garden container full stats + // cID5 garden container full stats (replacing Linux WorkingSet with CommitBytes) cID5Metrics := mock.GetFullSampleContainerEntry() cID5Metrics.ContainerStats.Timestamp = testTime cID5Metrics.NetworkStats.Timestamp = testTime cID5Metrics.ContainerStats.PID.PIDs = []int{6, 7} + cID5Metrics.ContainerStats.Memory.WorkingSet = nil + cID5Metrics.ContainerStats.Memory.CommitBytes = pointer.Ptr(355.0) metricsCollector.SetContainerEntry("cID5", cID5Metrics) metadataProvider.SetEntity(&workloadmeta.Container{ EntityID: workloadmeta.EntityID{ @@ -300,21 +302,22 @@ func TestGetContainers(t *testing.T) { assert.NoError(t, err) assert.Empty(t, compareResults(processContainers, []*process.Container{ { - Type: "containerd", - Id: "cID1", - CpuLimit: 50, - MemoryLimit: 42000, - State: process.ContainerState_running, - Health: process.ContainerHealth_healthy, - Created: testTime.Add(-10 * time.Minute).Unix(), - UserPct: -1, - SystemPct: -1, - TotalPct: -1, - CpuUsageNs: -1, - MemUsage: 42000, - MemRss: 300, - MemCache: 200, - Started: testTime.Unix(), + Type: "containerd", + Id: "cID1", + CpuLimit: 50, + MemoryLimit: 42000, + State: process.ContainerState_running, + Health: process.ContainerHealth_healthy, + Created: testTime.Add(-10 * time.Minute).Unix(), + UserPct: -1, + SystemPct: -1, + TotalPct: -1, + CpuUsageNs: -1, + MemUsage: 42000, + MemRss: 300, + MemAccounted: 350, + MemCache: 200, + Started: testTime.Unix(), Tags: []string{ "low:common", "orch:orch1", @@ -361,21 +364,22 @@ func TestGetContainers(t *testing.T) { }, }, { - Type: "containerd", - Id: "cID4", - CpuLimit: 50, - MemoryLimit: 42000, - State: process.ContainerState_running, - Health: process.ContainerHealth_healthy, - Created: testTime.Add(-10 * time.Minute).Unix(), - UserPct: -1, - SystemPct: -1, - TotalPct: -1, - CpuUsageNs: -1, - MemUsage: 42000, - MemRss: 300, - MemCache: 200, - Started: testTime.Unix(), + Type: "containerd", + Id: "cID4", + CpuLimit: 50, + MemoryLimit: 42000, + State: process.ContainerState_running, + Health: process.ContainerHealth_healthy, + Created: testTime.Add(-10 * time.Minute).Unix(), + UserPct: -1, + SystemPct: -1, + TotalPct: -1, + CpuUsageNs: -1, + MemUsage: 42000, + MemRss: 300, + MemCache: 200, + MemAccounted: 350, + Started: testTime.Unix(), Addresses: []*process.ContainerAddr{ { Ip: "192.168.0.4", @@ -392,21 +396,22 @@ func TestGetContainers(t *testing.T) { ThreadLimit: 20, }, { - Type: "garden", - Id: "cID5", - CpuLimit: 50, - MemoryLimit: 42000, - State: process.ContainerState_running, - Created: testTime.Unix(), - UserPct: -1, - SystemPct: -1, - TotalPct: -1, - CpuUsageNs: -1, - MemUsage: 42000, - MemRss: 300, - MemCache: 200, - Started: testTime.Unix(), - Tags: []string{"from:pcf", "id:container5"}, + Type: "garden", + Id: "cID5", + CpuLimit: 50, + MemoryLimit: 42000, + State: process.ContainerState_running, + Created: testTime.Unix(), + UserPct: -1, + SystemPct: -1, + TotalPct: -1, + CpuUsageNs: -1, + MemUsage: 42000, + MemRss: 300, + MemCache: 200, + MemAccounted: 355, + Started: testTime.Unix(), + Tags: []string{"from:pcf", "id:container5"}, Addresses: []*process.ContainerAddr{ { Ip: "10.0.0.5", @@ -494,27 +499,28 @@ func TestGetContainers(t *testing.T) { assert.NoError(t, err) assert.Empty(t, compareResults(processContainers, []*process.Container{ { - Type: "containerd", - Id: "cID1", - CpuLimit: 50, - MemoryLimit: 42000, - State: process.ContainerState_running, - Health: process.ContainerHealth_healthy, - Created: testTime.Add(-10 * time.Minute).Unix(), - UserPct: 60, - SystemPct: 40, - TotalPct: 20, - CpuUsageNs: 199999984, - MemUsage: 43000, - MemRss: 300, - MemCache: 200, - Rbps: 20, - Wbps: 40, - NetRcvdPs: 40, - NetSentPs: 40, - NetRcvdBps: 4, - NetSentBps: 4, - Started: testTime.Unix(), + Type: "containerd", + Id: "cID1", + CpuLimit: 50, + MemoryLimit: 42000, + State: process.ContainerState_running, + Health: process.ContainerHealth_healthy, + Created: testTime.Add(-10 * time.Minute).Unix(), + UserPct: 60, + SystemPct: 40, + TotalPct: 20, + CpuUsageNs: 199999984, + MemUsage: 43000, + MemRss: 300, + MemCache: 200, + MemAccounted: 350, + Rbps: 20, + Wbps: 40, + NetRcvdPs: 40, + NetSentPs: 40, + NetRcvdBps: 4, + NetSentBps: 4, + Started: testTime.Unix(), Tags: []string{ "low:common", "orch:orch1", @@ -561,27 +567,28 @@ func TestGetContainers(t *testing.T) { }, }, { - Type: "containerd", - Id: "cID4", - CpuLimit: 50, - MemoryLimit: 42000, - State: process.ContainerState_running, - Health: process.ContainerHealth_healthy, - Created: testTime.Add(-10 * time.Minute).Unix(), - UserPct: -1, - SystemPct: -1, - TotalPct: -1, - CpuUsageNs: -1, - MemUsage: 42000, - MemRss: 300, - MemCache: 200, - Rbps: 0, - Wbps: 0, - NetRcvdPs: 0, - NetSentPs: 0, - NetRcvdBps: 0, - NetSentBps: 0, - Started: testTime.Unix(), + Type: "containerd", + Id: "cID4", + CpuLimit: 50, + MemoryLimit: 42000, + State: process.ContainerState_running, + Health: process.ContainerHealth_healthy, + Created: testTime.Add(-10 * time.Minute).Unix(), + UserPct: -1, + SystemPct: -1, + TotalPct: -1, + CpuUsageNs: -1, + MemUsage: 42000, + MemRss: 300, + MemCache: 200, + MemAccounted: 350, + Rbps: 0, + Wbps: 0, + NetRcvdPs: 0, + NetSentPs: 0, + NetRcvdBps: 0, + NetSentBps: 0, + Started: testTime.Unix(), Addresses: []*process.ContainerAddr{ { Ip: "192.168.0.4", @@ -598,21 +605,22 @@ func TestGetContainers(t *testing.T) { ThreadLimit: 20, }, { - Type: "garden", - Id: "cID5", - CpuLimit: 50, - MemoryLimit: 42000, - State: process.ContainerState_running, - Created: testTime.Unix(), - UserPct: 0, - SystemPct: 0, - TotalPct: 0, - CpuUsageNs: 0, - MemUsage: 42000, - MemRss: 300, - MemCache: 200, - Started: testTime.Unix(), - Tags: []string{"from:pcf", "id:container5"}, + Type: "garden", + Id: "cID5", + CpuLimit: 50, + MemoryLimit: 42000, + State: process.ContainerState_running, + Created: testTime.Unix(), + UserPct: 0, + SystemPct: 0, + TotalPct: 0, + CpuUsageNs: 0, + MemUsage: 42000, + MemRss: 300, + MemCache: 200, + MemAccounted: 355, + Started: testTime.Unix(), + Tags: []string{"from:pcf", "id:container5"}, Addresses: []*process.ContainerAddr{ { Ip: "10.0.0.5", diff --git a/pkg/process/util/util.go b/pkg/process/util/util.go index 5449ee440d562..363f49d14118e 100644 --- a/pkg/process/util/util.go +++ b/pkg/process/util/util.go @@ -18,7 +18,7 @@ import ( ) // ErrNotImplemented is the "not implemented" error given by `gopsutil` when an -// OS doesn't support and API. Unfortunately it's in an internal package so +// OS doesn't support an API. Unfortunately it's in an internal package so // we can't import it so we'll copy it here. var ErrNotImplemented = errors.New("not implemented yet") diff --git a/pkg/proto/datadog/remoteconfig/remoteconfig.proto b/pkg/proto/datadog/remoteconfig/remoteconfig.proto index 4e0ea38a98231..b1ec378fca218 100644 --- a/pkg/proto/datadog/remoteconfig/remoteconfig.proto +++ b/pkg/proto/datadog/remoteconfig/remoteconfig.proto @@ -86,6 +86,7 @@ message ClientTracer { string language = 2; string tracer_version = 3; string service = 4; + repeated string extra_services = 8; string env = 5; string app_version = 6; repeated string tags = 7; diff --git a/pkg/proto/empty.go b/pkg/proto/empty.go new file mode 100644 index 0000000000000..477ea03e23ebc --- /dev/null +++ b/pkg/proto/empty.go @@ -0,0 +1,6 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-2020 Datadog, Inc. + +package proto diff --git a/pkg/proto/go.mod b/pkg/proto/go.mod new file mode 100644 index 0000000000000..57ec39d788df3 --- /dev/null +++ b/pkg/proto/go.mod @@ -0,0 +1,22 @@ +module github.com/DataDog/datadog-agent/pkg/proto + +go 1.19 + +retract v0.46.0-devel + +require ( + github.com/golang/mock v1.6.0 + github.com/golang/protobuf v1.5.3 + github.com/grpc-ecosystem/grpc-gateway v1.16.0 + github.com/tinylib/msgp v1.1.8 + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 + google.golang.org/grpc v1.55.0 + google.golang.org/protobuf v1.30.0 +) + +require ( + github.com/philhofer/fwd v1.1.2 // indirect + golang.org/x/net v0.8.0 // indirect + golang.org/x/sys v0.6.0 // indirect + golang.org/x/text v0.8.0 // indirect +) diff --git a/pkg/proto/go.sum b/pkg/proto/go.sum new file mode 100644 index 0000000000000..39254afeb9e8e --- /dev/null +++ b/pkg/proto/go.sum @@ -0,0 +1,128 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= +github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0= +github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= +google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/pkg/proto/pbgo/api.pb.go b/pkg/proto/pbgo/api.pb.go index 197f152ae1071..a878d4af11817 100644 --- a/pkg/proto/pbgo/api.pb.go +++ b/pkg/proto/pbgo/api.pb.go @@ -1,20 +1,20 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.6.1 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: datadog/api/v1/api.proto package pbgo import ( context "context" - empty "github.com/golang/protobuf/ptypes/empty" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" reflect "reflect" ) @@ -125,7 +125,7 @@ var file_datadog_api_v1_api_proto_goTypes = []interface{}{ (*CaptureTriggerRequest)(nil), // 3: datadog.model.v1.CaptureTriggerRequest (*TaggerState)(nil), // 4: datadog.model.v1.TaggerState (*ClientGetConfigsRequest)(nil), // 5: datadog.config.ClientGetConfigsRequest - (*empty.Empty)(nil), // 6: google.protobuf.Empty + (*emptypb.Empty)(nil), // 6: google.protobuf.Empty (*WorkloadmetaStreamRequest)(nil), // 7: datadog.workloadmeta.WorkloadmetaStreamRequest (*HostnameReply)(nil), // 8: datadog.model.v1.HostnameReply (*StreamTagsResponse)(nil), // 9: datadog.model.v1.StreamTagsResponse @@ -326,7 +326,7 @@ type AgentSecureClient interface { // TODO: add the curl code here DogstatsdSetTaggerState(ctx context.Context, in *TaggerState, opts ...grpc.CallOption) (*TaggerStateResponse, error) ClientGetConfigs(ctx context.Context, in *ClientGetConfigsRequest, opts ...grpc.CallOption) (*ClientGetConfigsResponse, error) - GetConfigState(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetStateConfigResponse, error) + GetConfigState(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetStateConfigResponse, error) // Subscribes to added, removed, or changed entities in the Workloadmeta and // streams them to clients as events. // Can be called through the HTTP gateway, and events will be streamed as JSON. @@ -427,7 +427,7 @@ func (c *agentSecureClient) ClientGetConfigs(ctx context.Context, in *ClientGetC return out, nil } -func (c *agentSecureClient) GetConfigState(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetStateConfigResponse, error) { +func (c *agentSecureClient) GetConfigState(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetStateConfigResponse, error) { out := new(GetStateConfigResponse) err := c.cc.Invoke(ctx, "/datadog.api.v1.AgentSecure/GetConfigState", in, out, opts...) if err != nil { @@ -523,7 +523,7 @@ type AgentSecureServer interface { // TODO: add the curl code here DogstatsdSetTaggerState(context.Context, *TaggerState) (*TaggerStateResponse, error) ClientGetConfigs(context.Context, *ClientGetConfigsRequest) (*ClientGetConfigsResponse, error) - GetConfigState(context.Context, *empty.Empty) (*GetStateConfigResponse, error) + GetConfigState(context.Context, *emptypb.Empty) (*GetStateConfigResponse, error) // Subscribes to added, removed, or changed entities in the Workloadmeta and // streams them to clients as events. // Can be called through the HTTP gateway, and events will be streamed as JSON. @@ -567,7 +567,7 @@ func (*UnimplementedAgentSecureServer) DogstatsdSetTaggerState(context.Context, func (*UnimplementedAgentSecureServer) ClientGetConfigs(context.Context, *ClientGetConfigsRequest) (*ClientGetConfigsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ClientGetConfigs not implemented") } -func (*UnimplementedAgentSecureServer) GetConfigState(context.Context, *empty.Empty) (*GetStateConfigResponse, error) { +func (*UnimplementedAgentSecureServer) GetConfigState(context.Context, *emptypb.Empty) (*GetStateConfigResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetConfigState not implemented") } func (*UnimplementedAgentSecureServer) WorkloadmetaStreamEntities(*WorkloadmetaStreamRequest, AgentSecure_WorkloadmetaStreamEntitiesServer) error { @@ -672,7 +672,7 @@ func _AgentSecure_ClientGetConfigs_Handler(srv interface{}, ctx context.Context, } func _AgentSecure_GetConfigState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) + in := new(emptypb.Empty) if err := dec(in); err != nil { return nil, err } @@ -684,7 +684,7 @@ func _AgentSecure_GetConfigState_Handler(srv interface{}, ctx context.Context, d FullMethod: "/datadog.api.v1.AgentSecure/GetConfigState", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AgentSecureServer).GetConfigState(ctx, req.(*empty.Empty)) + return srv.(AgentSecureServer).GetConfigState(ctx, req.(*emptypb.Empty)) } return interceptor(ctx, in, info, handler) } diff --git a/pkg/proto/pbgo/api.pb.gw.go b/pkg/proto/pbgo/api.pb.gw.go index 1efdb47b8d8a3..5c2c2dbd071f1 100644 --- a/pkg/proto/pbgo/api.pb.gw.go +++ b/pkg/proto/pbgo/api.pb.gw.go @@ -15,13 +15,13 @@ import ( "github.com/golang/protobuf/descriptor" "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/empty" "github.com/grpc-ecosystem/grpc-gateway/runtime" "github.com/grpc-ecosystem/grpc-gateway/utilities" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/emptypb" ) // Suppress "imported and not used" errors @@ -212,7 +212,7 @@ func local_request_AgentSecure_ClientGetConfigs_0(ctx context.Context, marshaler } func request_AgentSecure_GetConfigState_0(ctx context.Context, marshaler runtime.Marshaler, client AgentSecureClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq empty.Empty + var protoReq emptypb.Empty var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) @@ -229,7 +229,7 @@ func request_AgentSecure_GetConfigState_0(ctx context.Context, marshaler runtime } func local_request_AgentSecure_GetConfigState_0(ctx context.Context, marshaler runtime.Marshaler, server AgentSecureServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq empty.Empty + var protoReq emptypb.Empty var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) diff --git a/pkg/proto/pbgo/mocks/api_mockgen.pb.go b/pkg/proto/pbgo/mocks/api_mockgen.pb.go index 59116ac7fb7f0..564c633a6e7ed 100644 --- a/pkg/proto/pbgo/mocks/api_mockgen.pb.go +++ b/pkg/proto/pbgo/mocks/api_mockgen.pb.go @@ -1,5 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: /Users/kyle.ames/go/src/github.com/DataDog/datadog-agent/pkg/proto/pbgo/api.pb.go +// Source: /Users/jaime.fullaondo/go/src/github.com/DataDog/datadog-agent/pkg/proto/pbgo/api.pb.go // Package mock_pbgo is a generated GoMock package. package mock_pbgo @@ -10,9 +10,9 @@ import ( pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo" gomock "github.com/golang/mock/gomock" - empty "github.com/golang/protobuf/ptypes/empty" grpc "google.golang.org/grpc" metadata "google.golang.org/grpc/metadata" + emptypb "google.golang.org/protobuf/types/known/emptypb" ) // MockAgentClient is a mock of AgentClient interface. @@ -180,7 +180,7 @@ func (mr *MockAgentSecureClientMockRecorder) DogstatsdSetTaggerState(ctx, in int } // GetConfigState mocks base method. -func (m *MockAgentSecureClient) GetConfigState(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*pbgo.GetStateConfigResponse, error) { +func (m *MockAgentSecureClient) GetConfigState(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*pbgo.GetStateConfigResponse, error) { m.ctrl.T.Helper() varargs := []interface{}{ctx, in} for _, a := range opts { @@ -574,7 +574,7 @@ func (mr *MockAgentSecureServerMockRecorder) DogstatsdSetTaggerState(arg0, arg1 } // GetConfigState mocks base method. -func (m *MockAgentSecureServer) GetConfigState(arg0 context.Context, arg1 *empty.Empty) (*pbgo.GetStateConfigResponse, error) { +func (m *MockAgentSecureServer) GetConfigState(arg0 context.Context, arg1 *emptypb.Empty) (*pbgo.GetStateConfigResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetConfigState", arg0, arg1) ret0, _ := ret[0].(*pbgo.GetStateConfigResponse) diff --git a/pkg/proto/pbgo/model.pb.go b/pkg/proto/pbgo/model.pb.go index 5144cfd7b4540..0cfc31560ba1d 100644 --- a/pkg/proto/pbgo/model.pb.go +++ b/pkg/proto/pbgo/model.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.6.1 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: datadog/model/v1/model.proto package pbgo diff --git a/pkg/proto/pbgo/process.pb.go b/pkg/proto/pbgo/process.pb.go index f7810ea35b8d3..941f9e90e6616 100644 --- a/pkg/proto/pbgo/process.pb.go +++ b/pkg/proto/pbgo/process.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.6.1 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: datadog/process/process.proto package pbgo diff --git a/pkg/proto/pbgo/remoteconfig.pb.go b/pkg/proto/pbgo/remoteconfig.pb.go index 258e0527785a9..5c44288b50f82 100644 --- a/pkg/proto/pbgo/remoteconfig.pb.go +++ b/pkg/proto/pbgo/remoteconfig.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v3.21.8 // source: datadog/remoteconfig/remoteconfig.proto @@ -717,6 +717,7 @@ type ClientTracer struct { Language string `protobuf:"bytes,2,opt,name=language,proto3" json:"language,omitempty"` TracerVersion string `protobuf:"bytes,3,opt,name=tracer_version,json=tracerVersion,proto3" json:"tracer_version,omitempty"` Service string `protobuf:"bytes,4,opt,name=service,proto3" json:"service,omitempty"` + ExtraServices []string `protobuf:"bytes,8,rep,name=extra_services,json=extraServices,proto3" json:"extra_services,omitempty"` Env string `protobuf:"bytes,5,opt,name=env,proto3" json:"env,omitempty"` AppVersion string `protobuf:"bytes,6,opt,name=app_version,json=appVersion,proto3" json:"app_version,omitempty"` Tags []string `protobuf:"bytes,7,rep,name=tags,proto3" json:"tags,omitempty"` @@ -782,6 +783,13 @@ func (x *ClientTracer) GetService() string { return "" } +func (x *ClientTracer) GetExtraServices() []string { + if x != nil { + return x.ExtraServices + } + return nil +} + func (x *ClientTracer) GetEnv() string { if x != nil { return x.Env @@ -1686,7 +1694,7 @@ var file_datadog_remoteconfig_remoteconfig_proto_rawDesc = []byte{ 0x6c, 0x61, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x4a, 0x04, 0x08, 0x04, - 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd1, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, + 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xf8, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x61, 0x6e, 0x67, @@ -1695,147 +1703,149 @@ var file_datadog_remoteconfig_remoteconfig_proto_rawDesc = []byte{ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x76, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x65, 0x6e, 0x76, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x70, 0x5f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, - 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, - 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x22, 0xa2, 0x01, 0x0a, - 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, - 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, - 0x63, 0x77, 0x73, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, 0x05, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x77, 0x73, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, - 0x73, 0x22, 0x93, 0x01, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, - 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x70, - 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, - 0x6f, 0x64, 0x75, 0x63, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x5f, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x6c, - 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x5f, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x70, - 0x6c, 0x79, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x80, 0x02, 0x0a, 0x0b, 0x43, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x6f, 0x6f, 0x74, 0x5f, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x72, - 0x6f, 0x6f, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x64, 0x61, 0x74, - 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x68, 0x61, 0x73, 0x5f, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x68, 0x61, 0x73, 0x45, 0x72, 0x72, - 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x14, 0x62, 0x61, 0x63, 0x6b, - 0x65, 0x6e, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22, 0x48, 0x0a, 0x0e, 0x54, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x48, 0x61, 0x73, 0x68, 0x12, 0x1c, 0x0a, 0x09, - 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, - 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x4a, 0x04, - 0x08, 0x02, 0x10, 0x03, 0x22, 0x74, 0x0a, 0x0e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, - 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, - 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, - 0x74, 0x68, 0x12, 0x36, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x48, 0x61, - 0x73, 0x68, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0x99, 0x01, 0x0a, 0x17, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x06, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x4e, 0x0a, 0x13, 0x63, 0x61, 0x63, 0x68, 0x65, 0x64, - 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x4d, - 0x65, 0x74, 0x61, 0x52, 0x11, 0x63, 0x61, 0x63, 0x68, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x22, 0xaa, 0x01, 0x0a, 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x6f, 0x6f, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0c, 0x52, 0x05, 0x72, 0x6f, 0x6f, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x66, 0x69, - 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x64, 0x61, 0x74, 0x61, - 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x52, - 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x04, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x73, 0x22, 0x3d, 0x0a, 0x0d, 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, - 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x61, - 0x73, 0x68, 0x22, 0x81, 0x05, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, - 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x60, 0x0a, 0x0e, 0x64, 0x69, 0x72, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x39, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x64, 0x69, - 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x66, 0x0a, 0x10, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x0e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x64, 0x61, - 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x52, 0x0d, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x73, 0x1a, 0x5d, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x33, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, - 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, - 0x61, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x1a, 0x5f, 0x0a, 0x12, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x33, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, - 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x65, - 0x74, 0x61, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xeb, 0x01, 0x0a, 0x11, 0x54, 0x72, 0x61, 0x63, 0x65, - 0x72, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x31, 0x12, 0x1a, 0x0a, 0x08, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, - 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x72, 0x61, - 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x61, - 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x61, - 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, - 0x65, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x75, 0x6e, 0x74, 0x69, - 0x6d, 0x65, 0x49, 0x44, 0x22, 0x67, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x72, - 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x14, 0x74, 0x72, 0x61, 0x63, - 0x65, 0x72, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x31, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x72, - 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x31, 0x52, 0x12, 0x74, 0x72, 0x61, 0x63, 0x65, - 0x72, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x56, 0x31, 0x42, 0x10, 0x5a, - 0x0e, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, + 0x78, 0x74, 0x72, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, + 0x65, 0x6e, 0x76, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x6e, 0x76, 0x12, 0x1f, + 0x0a, 0x0b, 0x61, 0x70, 0x70, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, + 0x61, 0x67, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x41, 0x67, + 0x65, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x77, 0x73, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, + 0x6f, 0x61, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x77, 0x73, 0x57, + 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x22, 0x93, 0x01, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x12, 0x1f, 0x0a, 0x0b, + 0x61, 0x70, 0x70, 0x6c, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x0a, + 0x0b, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x80, + 0x02, 0x0a, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x72, 0x6f, 0x6f, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0d, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0c, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, + 0x68, 0x61, 0x73, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x68, 0x61, 0x73, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, + 0x30, 0x0a, 0x14, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x62, + 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x22, 0x48, 0x0a, 0x0e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x48, + 0x61, 0x73, 0x68, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, + 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x68, 0x61, 0x73, 0x68, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0x74, 0x0a, 0x0e, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x12, 0x0a, + 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x36, 0x0a, 0x06, 0x68, 0x61, 0x73, + 0x68, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x46, 0x69, 0x6c, 0x65, 0x48, 0x61, 0x73, 0x68, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, + 0x73, 0x22, 0x99, 0x01, 0x0a, 0x17, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x47, 0x65, 0x74, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, + 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x4e, 0x0a, + 0x13, 0x63, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x66, + 0x69, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x64, 0x61, 0x74, + 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x11, 0x63, 0x61, 0x63, 0x68, + 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x22, 0xaa, 0x01, + 0x0a, 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x6f, + 0x6f, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x05, 0x72, 0x6f, 0x6f, 0x74, 0x73, + 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, + 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x22, 0x3d, 0x0a, 0x0d, 0x46, 0x69, + 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x22, 0x81, 0x05, 0x0a, 0x16, 0x47, 0x65, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x64, 0x61, 0x74, + 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x60, 0x0a, 0x0e, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, + 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0d, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x66, 0x0a, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x66, 0x69, 0x6c, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x64, + 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x65, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x0e, 0x61, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x0d, 0x61, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x5d, 0x0a, 0x10, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x33, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x5f, 0x0a, 0x12, 0x44, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x33, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x54, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xeb, 0x01, + 0x0a, 0x11, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x56, 0x31, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x65, 0x6e, 0x76, + 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x61, + 0x70, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x61, 0x70, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, + 0x09, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44, 0x22, 0x67, 0x0a, 0x10, 0x54, + 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, + 0x53, 0x0a, 0x14, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x31, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, + 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x31, + 0x52, 0x12, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x73, 0x56, 0x31, 0x42, 0x10, 0x5a, 0x0e, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/pkg/proto/pbgo/workloadmeta.pb.go b/pkg/proto/pbgo/workloadmeta.pb.go index cdc770f885a0e..cde3a56ff7937 100644 --- a/pkg/proto/pbgo/workloadmeta.pb.go +++ b/pkg/proto/pbgo/workloadmeta.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.6.1 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: datadog/workloadmeta/workloadmeta.proto package pbgo diff --git a/pkg/proto/utils/copier.go b/pkg/proto/utils/copier.go new file mode 100644 index 0000000000000..41717eddd22fa --- /dev/null +++ b/pkg/proto/utils/copier.go @@ -0,0 +1,51 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-2020 Datadog, Inc. + +package utils + +import ( + "fmt" + "reflect" +) + +// ProtoCopier returns a function that will shallow copy values of a given protobuf value's type, utilising any `Get` +// prefixed method, accepting no input parameters, and returning a single value of the same type, available for each +// given field, intended to be be used with generated code for protobuf messages +// NOTE a panic will occur if the v's type is not t +func ProtoCopier(v interface{}) func(v interface{}) interface{} { + var ( + t = reflect.TypeOf(v) + fieldMethods = make([][2]int, 0) + ) + for i := 0; i < t.Elem().NumField(); i++ { + field := t.Elem().Field(i) + if field.PkgPath != `` { + continue + } + method, ok := t.MethodByName(`Get` + field.Name) + if !ok || + method.Type.NumIn() != 1 || + method.Type.NumOut() != 1 || + method.Type.Out(0) != field.Type { + continue + } + fieldMethods = append(fieldMethods, [2]int{i, method.Index}) + } + return func(v interface{}) interface{} { + src := reflect.ValueOf(v) + protoCopierCheckType(t, src.Type()) + dst := reflect.New(t.Elem()).Elem() + for _, fieldMethod := range fieldMethods { + dst.Field(fieldMethod[0]).Set(src.Method(fieldMethod[1]).Call(nil)[0]) + } + return dst.Addr().Interface() + } +} + +func protoCopierCheckType(dst, src reflect.Type) { + if dst != src { + panic(fmt.Errorf(`ProtoCopier dst %s != src %s`, dst, src)) + } +} diff --git a/pkg/sbom/sbom.go b/pkg/sbom/sbom.go index ec913648c822f..5c1194cebef2a 100644 --- a/pkg/sbom/sbom.go +++ b/pkg/sbom/sbom.go @@ -35,15 +35,15 @@ type ScanOptions struct { // ScanOptionsFromConfig loads the scanning options from the configuration func ScanOptionsFromConfig(cfg config.Config, containers bool) (scanOpts ScanOptions) { if containers { - scanOpts.CheckDiskUsage = config.Datadog.GetBool("container_image_collection.sbom.check_disk_usage") - scanOpts.MinAvailableDisk = uint64(config.Datadog.GetSizeInBytes("container_image_collection.sbom.min_available_disk")) - scanOpts.Timeout = time.Duration(config.Datadog.GetInt("container_image_collection.sbom.scan_timeout")) * time.Second - scanOpts.WaitAfter = time.Duration(config.Datadog.GetInt("container_image_collection.sbom.scan_interval")) * time.Second - scanOpts.Analyzers = config.Datadog.GetStringSlice("container_image_collection.sbom.analyzers") + scanOpts.CheckDiskUsage = config.Datadog.GetBool("sbom.container_image.check_disk_usage") + scanOpts.MinAvailableDisk = uint64(config.Datadog.GetSizeInBytes("sbom.container_image.min_available_disk")) + scanOpts.Timeout = time.Duration(config.Datadog.GetInt("sbom.container_image.scan_timeout")) * time.Second + scanOpts.WaitAfter = time.Duration(config.Datadog.GetInt("sbom.container_image.scan_interval")) * time.Second + scanOpts.Analyzers = config.Datadog.GetStringSlice("sbom.container_image.analyzers") } if len(scanOpts.Analyzers) == 0 { - scanOpts.Analyzers = config.Datadog.GetStringSlice("sbom.analyzers") + scanOpts.Analyzers = config.Datadog.GetStringSlice("sbom.host.analyzers") } return diff --git a/pkg/sbom/scanner/scanner.go b/pkg/sbom/scanner/scanner.go index e1157435fe097..470d52c4fffed 100644 --- a/pkg/sbom/scanner/scanner.go +++ b/pkg/sbom/scanner/scanner.go @@ -79,7 +79,7 @@ func (s *Scanner) start(ctx context.Context) { return } go func() { - cleanTicker := time.NewTicker(config.Datadog.GetDuration("sbom.cache_clean_interval")) + cleanTicker := time.NewTicker(config.Datadog.GetDuration("sbom.cache.clean_interval")) defer cleanTicker.Stop() s.running = true defer func() { s.running = false }() @@ -134,7 +134,7 @@ func (s *Scanner) start(ctx context.Context) { scanResult.Duration = generationDuration cancel() - telemetry.SBOMGenerationDuration.Observe(generationDuration.Seconds()) + telemetry.SBOMGenerationDuration.Observe(generationDuration.Seconds(), request.Collector(), request.Type()) sendResult(scanResult) if request.opts.WaitAfter != 0 { t := time.NewTimer(request.opts.WaitAfter) @@ -168,7 +168,7 @@ func NewScanner(cfg config.Config) *Scanner { // global one, and returns it. Start() needs to be called before any data // collection happens. func CreateGlobalScanner(cfg config.Config) (*Scanner, error) { - if !cfg.GetBool("sbom.enabled") && !cfg.GetBool("container_image_collection.sbom.enabled") && !cfg.GetBool("runtime_security_config.sbom.enabled") { + if !cfg.GetBool("sbom.host.enabled") && !cfg.GetBool("sbom.container_image.enabled") && !cfg.GetBool("runtime_security_config.sbom.enabled") { return nil, nil } diff --git a/pkg/sbom/telemetry/telemetry.go b/pkg/sbom/telemetry/telemetry.go index 3108c076ec32b..972394827c60f 100644 --- a/pkg/sbom/telemetry/telemetry.go +++ b/pkg/sbom/telemetry/telemetry.go @@ -38,7 +38,7 @@ var ( SBOMGenerationDuration = telemetry.NewHistogramWithOpts( subsystem, "generation_duration", - []string{}, + []string{"source", "type"}, "SBOM generation duration (in seconds)", []float64{10, 30, 60, 120, 180, 240, 300, 360, 420, 480, 540, 600}, commonOpts, diff --git a/pkg/security/agent/client.go b/pkg/security/agent/client.go index 4ba71de902f9a..a4dde81ebf13b 100644 --- a/pkg/security/agent/client.go +++ b/pkg/security/agent/client.go @@ -123,6 +123,23 @@ func (c *RuntimeSecurityClient) GetActivityDumpStream() (api.SecurityModule_GetA return stream, nil } +// ListSecurityProfiles lists the profiles held in memory by the Security Profile manager +func (c *RuntimeSecurityClient) ListSecurityProfiles(includeCache bool) (*api.SecurityProfileListMessage, error) { + return c.apiClient.ListSecurityProfiles(context.Background(), &api.SecurityProfileListParams{ + IncludeCache: includeCache, + }) +} + +// SaveSecurityProfile saves the requested security profile to disk +func (c *RuntimeSecurityClient) SaveSecurityProfile(name string, tag string) (*api.SecurityProfileSaveMessage, error) { + return c.apiClient.SaveSecurityProfile(context.Background(), &api.SecurityProfileSaveParams{ + Selector: &api.WorkloadSelectorMessage{ + Name: name, + Tag: tag, + }, + }) +} + // Close closes the connection func (c *RuntimeSecurityClient) Close() { c.conn.Close() diff --git a/pkg/security/common/account_id.go b/pkg/security/common/account_id.go new file mode 100644 index 0000000000000..87aefe4aa1b88 --- /dev/null +++ b/pkg/security/common/account_id.go @@ -0,0 +1,71 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package common + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/azure" + "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/gce" + "github.com/DataDog/datadog-agent/pkg/util/ec2" + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +type cloudProviderDetector struct { + name string + accountIdName string + callback func(context.Context) bool + accountIdCallback func(context.Context) (string, error) +} + +func queryAccountId(ctx context.Context) (string, string, error) { + detectors := []cloudProviderDetector{ + {name: ec2.CloudProviderName, accountIdName: "account_id", callback: ec2.IsRunningOn, accountIdCallback: ec2.GetAccountID}, + {name: gce.CloudProviderName, accountIdName: "project_id", callback: gce.IsRunningOn, accountIdCallback: gce.GetProjectID}, + {name: azure.CloudProviderName, accountIdName: "subscription_id", callback: azure.IsRunningOn, accountIdCallback: azure.GetSubscriptionID}, + } + + for _, cloudDetector := range detectors { + if cloudDetector.callback(ctx) { + log.Infof("Cloud provider %s detected", cloudDetector.name) + + accountID, err := cloudDetector.accountIdCallback(ctx) + if err != nil { + return "", "", fmt.Errorf("could not detect cloud provider account ID: %w", err) + } + + log.Infof("Detecting account id from %s cloud provider: %+q", cloudDetector.name, accountID) + + return cloudDetector.accountIdName, accountID, nil + } + } + + return "", "", fmt.Errorf("no cloud provider detected") +} + +var accountIdTagCache struct { + sync.Once + value string +} + +func QueryAccountIdTag() string { + accountIdTagCache.Do(func() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + tagName, tagValue, err := queryAccountId(ctx) + if err != nil { + log.Errorf("failed to query account id: %v", err) + return + } + accountIdTagCache.value = fmt.Sprintf("%s:%s", tagName, tagValue) + }) + + return accountIdTagCache.value +} diff --git a/cmd/security-agent/command/logs_context.go b/pkg/security/common/logs_context.go similarity index 63% rename from cmd/security-agent/command/logs_context.go rename to pkg/security/common/logs_context.go index 8ef83f275f33d..cd7cee5e8d789 100644 --- a/cmd/security-agent/command/logs_context.go +++ b/pkg/security/common/logs_context.go @@ -3,33 +3,35 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package command +package common import ( - "github.com/DataDog/datadog-agent/comp/core/log" + "fmt" + pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/logs" "github.com/DataDog/datadog-agent/pkg/logs/client" logshttp "github.com/DataDog/datadog-agent/pkg/logs/client/http" logsconfig "github.com/DataDog/datadog-agent/pkg/logs/config" + "github.com/DataDog/datadog-agent/pkg/util/log" ) const ( cwsIntakeOrigin logsconfig.IntakeOrigin = "cloud-workload-security" ) -func NewLogContextCompliance(log log.Component) (*logsconfig.Endpoints, *client.DestinationsContext, error) { +func NewLogContextCompliance() (*logsconfig.Endpoints, *client.DestinationsContext, error) { logsConfigComplianceKeys := logsconfig.NewLogsConfigKeys("compliance_config.endpoints.", pkgconfig.Datadog) - return NewLogContext(log, logsConfigComplianceKeys, "cspm-intake.", "compliance", logsconfig.DefaultIntakeOrigin, logs.AgentJSONIntakeProtocol) + return NewLogContext(logsConfigComplianceKeys, "cspm-intake.", "compliance", logsconfig.DefaultIntakeOrigin, logs.AgentJSONIntakeProtocol) } // This function will only be used on Linux. The only platforms where the runtime agent runs -func NewLogContextRuntime(log log.Component) (*logsconfig.Endpoints, *client.DestinationsContext, error) { +func NewLogContextRuntime() (*logsconfig.Endpoints, *client.DestinationsContext, error) { logsRuntimeConfigKeys := logsconfig.NewLogsConfigKeys("runtime_security_config.endpoints.", pkgconfig.Datadog) - return NewLogContext(log, logsRuntimeConfigKeys, "runtime-security-http-intake.logs.", "logs", cwsIntakeOrigin, logsconfig.DefaultIntakeProtocol) + return NewLogContext(logsRuntimeConfigKeys, "runtime-security-http-intake.logs.", "logs", cwsIntakeOrigin, logsconfig.DefaultIntakeProtocol) } -func NewLogContext(log log.Component, logsConfig *logsconfig.LogsConfigKeys, endpointPrefix string, intakeTrackType logsconfig.IntakeTrackType, intakeOrigin logsconfig.IntakeOrigin, intakeProtocol logsconfig.IntakeProtocol) (*logsconfig.Endpoints, *client.DestinationsContext, error) { +func NewLogContext(logsConfig *logsconfig.LogsConfigKeys, endpointPrefix string, intakeTrackType logsconfig.IntakeTrackType, intakeOrigin logsconfig.IntakeOrigin, intakeProtocol logsconfig.IntakeProtocol) (*logsconfig.Endpoints, *client.DestinationsContext, error) { endpoints, err := logsconfig.BuildHTTPEndpointsWithConfig(logsConfig, endpointPrefix, intakeTrackType, intakeProtocol, intakeOrigin) if err != nil { endpoints, err = logsconfig.BuildHTTPEndpoints(intakeTrackType, intakeProtocol, intakeOrigin) @@ -40,7 +42,7 @@ func NewLogContext(log log.Component, logsConfig *logsconfig.LogsConfigKeys, end } if err != nil { - return nil, nil, log.Errorf("Invalid endpoints: %v", err) + return nil, nil, fmt.Errorf("invalid endpoints: %w", err) } for _, status := range endpoints.GetStatus() { diff --git a/pkg/security/config/config.go b/pkg/security/config/config.go index 1ec3014c52614..ceffaebb751b9 100644 --- a/pkg/security/config/config.go +++ b/pkg/security/config/config.go @@ -9,6 +9,7 @@ import ( "fmt" "time" + sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" coreconfig "github.com/DataDog/datadog-agent/pkg/config" logshttp "github.com/DataDog/datadog-agent/pkg/logs/client/http" logsconfig "github.com/DataDog/datadog-agent/pkg/logs/config" @@ -76,6 +77,9 @@ type RuntimeSecurityConfig struct { ActivityDumpTagsResolutionPeriod time.Duration // ActivityDumpLoadControlPeriod defines the period at which the activity dump manager should trigger the load controller ActivityDumpLoadControlPeriod time.Duration + // ActivityDumpLoadControlMinDumpTimeout defines minimal duration of a activity dump recording + ActivityDumpLoadControlMinDumpTimeout time.Duration + // ActivityDumpTracedCgroupsCount defines the maximum count of cgroups that should be monitored concurrently. Leave this parameter to 0 to prevent the generation // of activity dumps based on cgroups. ActivityDumpTracedCgroupsCount int @@ -126,6 +130,8 @@ type RuntimeSecurityConfig struct { SecurityProfileMaxCount int // SecurityProfileRCEnabled defines if remote-configuration is enabled SecurityProfileRCEnabled bool + // SecurityProfileDNSMatchMaxDepth defines the max depth of subdomain to be matched for DNS anomaly detection (0 to match everything) + SecurityProfileDNSMatchMaxDepth int // AnomalyDetectionEventTypes defines the list of events that should be allowed to generate anomaly detections AnomalyDetectionEventTypes []model.EventType @@ -142,9 +148,8 @@ type RuntimeSecurityConfig struct { // AnomalyDetectionWorkloadWarmupPeriod defines the duration we ignore the anomaly detections for // because of workload warm up AnomalyDetectionWorkloadWarmupPeriod time.Duration - // AnomalyDetectionRateLimiter limit number of anomaly event, one every N second - AnomalyDetectionRateLimiter int + AnomalyDetectionRateLimiter time.Duration // SBOMResolverEnabled defines if the SBOM resolver should be enabled SBOMResolverEnabled bool @@ -181,9 +186,8 @@ func NewConfig() (*Config, error) { } func NewRuntimeSecurityConfig() (*RuntimeSecurityConfig, error) { - dumpDuration := time.Duration(coreconfig.SystemProbe.GetInt("runtime_security_config.activity_dump.cgroup_dump_timeout")) * time.Minute - if dumpDuration == 0 { - dumpDuration = coreconfig.SystemProbe.GetDuration("runtime_security_config.activity_dump.dump_duration") + if !sysconfig.IsAdjusted(coreconfig.SystemProbe) { + sysconfig.Adjust(coreconfig.SystemProbe) } rsConfig := &RuntimeSecurityConfig{ @@ -215,9 +219,10 @@ func NewRuntimeSecurityConfig() (*RuntimeSecurityConfig, error) { ActivityDumpCleanupPeriod: coreconfig.SystemProbe.GetDuration("runtime_security_config.activity_dump.cleanup_period"), ActivityDumpTagsResolutionPeriod: coreconfig.SystemProbe.GetDuration("runtime_security_config.activity_dump.tags_resolution_period"), ActivityDumpLoadControlPeriod: coreconfig.SystemProbe.GetDuration("runtime_security_config.activity_dump.load_controller_period"), + ActivityDumpLoadControlMinDumpTimeout: coreconfig.SystemProbe.GetDuration("runtime_security_config.activity_dump.min_timeout"), ActivityDumpTracedCgroupsCount: coreconfig.SystemProbe.GetInt("runtime_security_config.activity_dump.traced_cgroups_count"), ActivityDumpTracedEventTypes: model.ParseEventTypeStringSlice(coreconfig.SystemProbe.GetStringSlice("runtime_security_config.activity_dump.traced_event_types")), - ActivityDumpCgroupDumpTimeout: dumpDuration, + ActivityDumpCgroupDumpTimeout: coreconfig.SystemProbe.GetDuration("runtime_security_config.activity_dump.dump_duration"), ActivityDumpRateLimiter: coreconfig.SystemProbe.GetInt("runtime_security_config.activity_dump.rate_limiter"), ActivityDumpCgroupWaitListTimeout: coreconfig.SystemProbe.GetDuration("runtime_security_config.activity_dump.cgroup_wait_list_timeout"), ActivityDumpCgroupDifferentiateArgs: coreconfig.SystemProbe.GetBool("runtime_security_config.activity_dump.cgroup_differentiate_args"), @@ -241,12 +246,13 @@ func NewRuntimeSecurityConfig() (*RuntimeSecurityConfig, error) { SBOMResolverWorkloadsCacheSize: coreconfig.SystemProbe.GetInt("runtime_security_config.sbom.workloads_cache_size"), // security profiles - SecurityProfileEnabled: coreconfig.SystemProbe.GetBool("runtime_security_config.security_profile.enabled"), - SecurityProfileDir: coreconfig.SystemProbe.GetString("runtime_security_config.security_profile.dir"), - SecurityProfileWatchDir: coreconfig.SystemProbe.GetBool("runtime_security_config.security_profile.watch_dir"), - SecurityProfileCacheSize: coreconfig.SystemProbe.GetInt("runtime_security_config.security_profile.cache_size"), - SecurityProfileMaxCount: coreconfig.SystemProbe.GetInt("runtime_security_config.security_profile.max_count"), - SecurityProfileRCEnabled: coreconfig.SystemProbe.GetBool("runtime_security_config.security_profile.remote_configuration.enabled"), + SecurityProfileEnabled: coreconfig.SystemProbe.GetBool("runtime_security_config.security_profile.enabled"), + SecurityProfileDir: coreconfig.SystemProbe.GetString("runtime_security_config.security_profile.dir"), + SecurityProfileWatchDir: coreconfig.SystemProbe.GetBool("runtime_security_config.security_profile.watch_dir"), + SecurityProfileCacheSize: coreconfig.SystemProbe.GetInt("runtime_security_config.security_profile.cache_size"), + SecurityProfileMaxCount: coreconfig.SystemProbe.GetInt("runtime_security_config.security_profile.max_count"), + SecurityProfileRCEnabled: coreconfig.SystemProbe.GetBool("runtime_security_config.security_profile.remote_configuration.enabled"), + SecurityProfileDNSMatchMaxDepth: coreconfig.SystemProbe.GetInt("runtime_security_config.security_profile.dns_match_max_depth"), // anomaly detection AnomalyDetectionEventTypes: model.ParseEventTypeStringSlice(coreconfig.SystemProbe.GetStringSlice("runtime_security_config.security_profile.anomaly_detection.event_types")), @@ -254,7 +260,7 @@ func NewRuntimeSecurityConfig() (*RuntimeSecurityConfig, error) { AnomalyDetectionWorkloadWarmupPeriod: coreconfig.SystemProbe.GetDuration("runtime_security_config.security_profile.anomaly_detection.workload_warmup_period"), AnomalyDetectionUnstableProfileTimeThreshold: coreconfig.SystemProbe.GetDuration("runtime_security_config.security_profile.anomaly_detection.unstable_profile_time_threshold"), AnomalyDetectionUnstableProfileSizeThreshold: coreconfig.SystemProbe.GetInt64("runtime_security_config.security_profile.anomaly_detection.unstable_profile_size_threshold"), - AnomalyDetectionRateLimiter: coreconfig.SystemProbe.GetInt("runtime_security_config.security_profile.anomaly_detection.rate_limiter"), + AnomalyDetectionRateLimiter: coreconfig.SystemProbe.GetDuration("runtime_security_config.security_profile.anomaly_detection.rate_limiter"), } if err := rsConfig.sanitize(); err != nil { @@ -271,16 +277,6 @@ func (c *RuntimeSecurityConfig) IsRuntimeEnabled() bool { // sanitize ensures that the configuration is properly setup func (c *RuntimeSecurityConfig) sanitize() error { - // if runtime is enabled then we force fim - if c.RuntimeEnabled { - c.FIMEnabled = true - } - - // if runtime is disabled then we force disable activity dumps - if !c.RuntimeEnabled { - c.ActivityDumpEnabled = false - } - serviceName := utils.GetTagValue("service", coreconfig.GetGlobalConfiguredTags(true)) if len(serviceName) > 0 { c.HostServiceName = fmt.Sprintf("service:%s", serviceName) diff --git a/pkg/security/ebpf/c/include/helpers/exec.h b/pkg/security/ebpf/c/include/helpers/exec.h index 1e84fe4e7f0bb..42535d8b431e6 100644 --- a/pkg/security/ebpf/c/include/helpers/exec.h +++ b/pkg/security/ebpf/c/include/helpers/exec.h @@ -20,54 +20,6 @@ int __attribute__((always_inline)) handle_exec_event(struct pt_regs *ctx, struct syscall->exec.file.path_key.mount_id = mount_id; syscall->exec.file.path_key.path_id = get_path_id(0); - u64 pid_tgid = bpf_get_current_pid_tgid(); - u32 tgid = pid_tgid >> 32; - - struct proc_cache_t pc = { - .entry = { - .executable = { - .path_key = { - .ino = syscall->exec.file.path_key.ino, - .mount_id = mount_id, - .path_id = syscall->exec.file.path_key.path_id, - }, - .flags = syscall->exec.file.flags - }, - .exec_timestamp = bpf_ktime_get_ns(), - }, - .container = {}, - }; - fill_file_metadata(syscall->exec.dentry, &pc.entry.executable.metadata); - bpf_get_current_comm(&pc.entry.comm, sizeof(pc.entry.comm)); - - // select the previous cookie entry in cache of the current process - // (this entry was created by the fork of the current process) - struct pid_cache_t *fork_entry = (struct pid_cache_t *) bpf_map_lookup_elem(&pid_cache, &tgid); - if (fork_entry) { - // Fetch the parent proc cache entry - u32 parent_cookie = fork_entry->cookie; - struct proc_cache_t *parent_pc = get_proc_from_cookie(parent_cookie); - if (parent_pc) { - // inherit the parent container context - fill_container_context(parent_pc, &pc.container); - } - } - - // Insert new proc cache entry (Note: do not move the order of this block with the previous one, we need to inherit - // the container ID before saving the entry in proc_cache. Modifying entry after insertion won't work.) - u32 cookie = bpf_get_prandom_u32(); - bpf_map_update_elem(&proc_cache, &cookie, &pc, BPF_ANY); - - // update pid <-> cookie mapping - if (fork_entry) { - fork_entry->cookie = cookie; - } else { - struct pid_cache_t new_pid_entry = { - .cookie = cookie, - }; - bpf_map_update_elem(&pid_cache, &tgid, &new_pid_entry, BPF_ANY); - } - // resolve dentry syscall->resolver.key = syscall->exec.file.path_key; syscall->resolver.dentry = syscall->exec.dentry; diff --git a/pkg/security/ebpf/c/include/helpers/process.h b/pkg/security/ebpf/c/include/helpers/process.h index 6210ce577d5fb..85d15c1a758be 100644 --- a/pkg/security/ebpf/c/include/helpers/process.h +++ b/pkg/security/ebpf/c/include/helpers/process.h @@ -18,16 +18,16 @@ static __attribute__((always_inline)) u32 copy_tty_name(const char src[TTY_NAME_ return TTY_NAME_LEN; } -void __attribute__((always_inline)) copy_proc_entry_except_comm(struct process_entry_t* src, struct process_entry_t* dst) { +void __attribute__((always_inline)) copy_proc_entry(struct process_entry_t* src, struct process_entry_t* dst) { dst->executable = src->executable; dst->exec_timestamp = src->exec_timestamp; copy_tty_name(src->tty_name, dst->tty_name); + bpf_probe_read(dst->comm, TASK_COMM_LEN, src->comm); } void __attribute__((always_inline)) copy_proc_cache(struct proc_cache_t *src, struct proc_cache_t *dst) { copy_container_id(src->container.container_id, dst->container.container_id); - copy_proc_entry_except_comm(&src->entry, &dst->entry); - bpf_probe_read(dst->entry.comm, TASK_COMM_LEN, src->entry.comm); + copy_proc_entry(&src->entry, &dst->entry); } void __attribute__((always_inline)) copy_credentials(struct credentials_t* src, struct credentials_t* dst) { diff --git a/pkg/security/ebpf/c/include/hooks/exec.h b/pkg/security/ebpf/c/include/hooks/exec.h index 97aa382492107..1117f22a2d4bd 100644 --- a/pkg/security/ebpf/c/include/hooks/exec.h +++ b/pkg/security/ebpf/c/include/hooks/exec.h @@ -191,7 +191,6 @@ int sched_process_fork(struct _tracepoint_sched_process_fork *args) { event->pid_entry.fork_timestamp = ts; - bpf_get_current_comm(event->proc_entry.comm, sizeof(event->proc_entry.comm)); struct process_context_t *on_stack_process = &event->process; fill_process_context(on_stack_process); fill_span_context(&event->span); @@ -222,7 +221,7 @@ int sched_process_fork(struct _tracepoint_sched_process_fork *args) { struct proc_cache_t *parent_pc = get_proc_from_cookie(on_stack_cookie); if (parent_pc) { fill_container_context(parent_pc, &event->container); - copy_proc_entry_except_comm(&parent_pc->entry, &event->proc_entry); + copy_proc_entry(&parent_pc->entry, &event->proc_entry); } } @@ -614,41 +613,83 @@ int __attribute__((always_inline)) send_exec_event(struct pt_regs *ctx) { bpf_map_delete_elem(&exec_pid_transfer, &tgid); - struct pid_cache_t *pid_entry = (struct pid_cache_t *) bpf_map_lookup_elem(&pid_cache, &tgid); - if (pid_entry) { - u32 cookie = pid_entry->cookie; - struct proc_cache_t *pc = bpf_map_lookup_elem(&proc_cache, &cookie); - if (pc) { - struct process_event_t *event = new_process_event(0); - if (event == NULL) { - return 0; - } + struct proc_cache_t pc = { + .entry = { + .executable = { + .path_key = { + .ino = syscall->exec.file.path_key.ino, + .mount_id = syscall->exec.file.path_key.mount_id, + .path_id = syscall->exec.file.path_key.path_id, + }, + .flags = syscall->exec.file.flags + }, + .exec_timestamp = bpf_ktime_get_ns(), + }, + .container = {}, + }; + fill_file_metadata(syscall->exec.dentry, &pc.entry.executable.metadata); + bpf_get_current_comm(&pc.entry.comm, sizeof(pc.entry.comm)); + + // select the previous cookie entry in cache of the current process + // (this entry was created by the fork of the current process) + struct pid_cache_t *fork_entry = (struct pid_cache_t *) bpf_map_lookup_elem(&pid_cache, &tgid); + if (fork_entry) { + // Fetch the parent proc cache entry + u32 parent_cookie = fork_entry->cookie; + struct proc_cache_t *parent_pc = get_proc_from_cookie(parent_cookie); + if (parent_pc) { + // inherit the parent container context + fill_container_context(parent_pc, &pc.container); + } + } - // copy proc_cache data - fill_container_context(pc, &event->container); - copy_proc_entry_except_comm(&pc->entry, &event->proc_entry); - bpf_get_current_comm(&event->proc_entry.comm, sizeof(event->proc_entry.comm)); + // Insert new proc cache entry (Note: do not move the order of this block with the previous one, we need to inherit + // the container ID before saving the entry in proc_cache. Modifying entry after insertion won't work.) + u32 cookie = bpf_get_prandom_u32(); + bpf_map_update_elem(&proc_cache, &cookie, &pc, BPF_ANY); - // copy pid_cache entry data - copy_pid_cache_except_exit_ts(pid_entry, &event->pid_entry); + // update pid <-> cookie mapping + if (fork_entry) { + fork_entry->cookie = cookie; + } else { + struct pid_cache_t new_pid_entry = { + .cookie = cookie, + }; + bpf_map_update_elem(&pid_cache, &tgid, &new_pid_entry, BPF_ANY); + fork_entry = (struct pid_cache_t *) bpf_map_lookup_elem(&pid_cache, &tgid); + if (fork_entry == NULL) { + // should never happen, ignore + return 0; + } + } - // add pid / tid context - struct process_context_t *on_stack_process = &event->process; - fill_process_context(on_stack_process); + struct process_event_t *event = new_process_event(0); + if (event == NULL) { + return 0; + } - copy_span_context(&syscall->exec.span_context, &event->span); - fill_args_envs(event, syscall); + // copy proc_cache data + fill_container_context(&pc, &event->container); + copy_proc_entry(&pc.entry, &event->proc_entry); - // [activity_dump] check if this process should be traced - should_trace_new_process(ctx, now, tgid, event->container.container_id, event->proc_entry.comm); + // copy pid_cache entry data + copy_pid_cache_except_exit_ts(fork_entry, &event->pid_entry); - // add interpreter path info - event->linux_binprm.interpreter = syscall->exec.linux_binprm.interpreter; + // add pid / tid context + struct process_context_t *on_stack_process = &event->process; + fill_process_context(on_stack_process); - // send the entry to maintain userspace cache - send_event_ptr(ctx, EVENT_EXEC, event); - } - } + copy_span_context(&syscall->exec.span_context, &event->span); + fill_args_envs(event, syscall); + + // [activity_dump] check if this process should be traced + should_trace_new_process(ctx, now, tgid, event->container.container_id, event->proc_entry.comm); + + // add interpreter path info + event->linux_binprm.interpreter = syscall->exec.linux_binprm.interpreter; + + // send the entry to maintain userspace cache + send_event_ptr(ctx, EVENT_EXEC, event); return 0; } diff --git a/pkg/security/ebpf/c/include/maps.h b/pkg/security/ebpf/c/include/maps.h index 42e46a20f7e2f..5af3f79242ff8 100644 --- a/pkg/security/ebpf/c/include/maps.h +++ b/pkg/security/ebpf/c/include/maps.h @@ -46,8 +46,6 @@ BPF_LRU_MAP(syscalls, u64, struct syscall_cache_t, 1024) BPF_LRU_MAP(proc_cache, u32, struct proc_cache_t, 16384) BPF_LRU_MAP(pid_cache, u32, struct pid_cache_t, 16384) BPF_LRU_MAP(pid_ignored, u32, u32, 16738) -BPF_LRU_MAP(exec_count_fb, struct exec_path, u64, 2048) -BPF_LRU_MAP(exec_count_bb, struct exec_path, u64, 2048) BPF_LRU_MAP(exec_pid_transfer, u32, u64, 512) BPF_LRU_MAP(netns_cache, u32, u32, 40960) BPF_LRU_MAP(span_tls, u32, struct span_tls_t, 4096) diff --git a/pkg/security/ebpf/c/include/structs/process.h b/pkg/security/ebpf/c/include/structs/process.h index 895c1a32ed37e..c484bef34585a 100644 --- a/pkg/security/ebpf/c/include/structs/process.h +++ b/pkg/security/ebpf/c/include/structs/process.h @@ -61,10 +61,6 @@ struct str_array_buffer_t { char value[MAX_STR_BUFF_LEN]; }; -struct exec_path { - char filename[MAX_PATH_LEN]; -}; - union selinux_write_payload_t { // 1 for true, 0 for false, -1 (max) for error u32 bool_value; diff --git a/pkg/security/ebpf/compile_test.go b/pkg/security/ebpf/compile_test.go index 26098dc685e2d..771e993f277a4 100644 --- a/pkg/security/ebpf/compile_test.go +++ b/pkg/security/ebpf/compile_test.go @@ -13,15 +13,18 @@ import ( "github.com/stretchr/testify/require" sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" + "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" "github.com/DataDog/datadog-agent/pkg/security/probe/config" ) func TestLoaderCompile(t *testing.T) { - _, err := sysconfig.New("") - require.NoError(t, err) - cfg, err := config.NewConfig() - require.NoError(t, err) - out, err := getRuntimeCompiledPrograms(cfg, false, false, nil) - require.NoError(t, err) - _ = out.Close() + ebpftest.TestBuildMode(t, ebpftest.RuntimeCompiled, "", func(t *testing.T) { + _, err := sysconfig.New("") + require.NoError(t, err) + cfg, err := config.NewConfig() + require.NoError(t, err) + out, err := getRuntimeCompiledPrograms(cfg, false, false, nil) + require.NoError(t, err) + _ = out.Close() + }) } diff --git a/pkg/security/ebpf/kernel/kernel.go b/pkg/security/ebpf/kernel/kernel.go index 9976c012c280e..32261f3f5b063 100644 --- a/pkg/security/ebpf/kernel/kernel.go +++ b/pkg/security/ebpf/kernel/kernel.go @@ -81,6 +81,12 @@ var ( Kernel5_15 = kernel.VersionCode(5, 15, 0) // Kernel5_16 is the KernelVersion representation of kernel version 5.16 Kernel5_16 = kernel.VersionCode(5, 16, 0) + // Kernel5_17 is the KernelVersion representation of kernel version 5.17 + Kernel5_17 = kernel.VersionCode(5, 17, 0) + // Kernel5_18 is the KernelVersion representation of kernel version 5.18 + Kernel5_18 = kernel.VersionCode(5, 18, 0) + // Kernel5_19 is the KernelVersion representation of kernel version 5.19 + Kernel5_19 = kernel.VersionCode(5, 19, 0) ) // Version defines a kernel version helper diff --git a/pkg/security/events/custom.go b/pkg/security/events/custom.go index 14d854d44857b..b232ccabcae8d 100644 --- a/pkg/security/events/custom.go +++ b/pkg/security/events/custom.go @@ -22,20 +22,43 @@ const ( // LostEventsRuleID is the rule ID for the lost_events_* events LostEventsRuleID = "lost_events" + //LostEventsRuleDesc is the rule description for the lost_events_* events + LostEventsRuleDesc = "Lost events" + // RulesetLoadedRuleID is the rule ID for the ruleset_loaded events RulesetLoadedRuleID = "ruleset_loaded" + // RulesetLoadedRuleDesc is the rule description for the ruleset_loaded events + RulesetLoadedRuleDesc = "New ruleset loaded" + // NoisyProcessRuleID is the rule ID for the noisy_process events NoisyProcessRuleID = "noisy_process" + // NoisyProcessRuleDesc is the rule description for the noisy_process events + NoisyProcessRuleDesc = "Noisy process detected" + // AbnormalPathRuleID is the rule ID for the abnormal_path events AbnormalPathRuleID = "abnormal_path" + // AbnormalPathRuleDesc is the rule description for the abnormal_path events + AbnormalPathRuleDesc = "Abnormal path detected" + // SelfTestRuleID is the rule ID for the self_test events SelfTestRuleID = "self_test" + // SelfTestRuleDesc is the rule description for the self_test events + SelfTestRuleDesc = "Self tests result" + // AnomalyDetectionRuleID is the rule ID for anomaly_detection events AnomalyDetectionRuleID = "anomaly_detection" + // AnomalyDetectionRuleID is the rule description for anomaly_detection events + AnomalyDetectionRuleDesc = "Anomaly detection" + // NoProcessContextErrorRuleID is the rule ID for events without process context NoProcessContextErrorRuleID = "no_process_context" + // NoProcessContextErrorRuleDesc is the rule description for events without process context + NoProcessContextErrorRuleDesc = "No process context detected" + // BrokenProcessLineageErrorRuleID is the rule ID for events with a broken process lineage BrokenProcessLineageErrorRuleID = "broken_process_lineage" + // BrokenProcessLineageErrorRuleDesc is the rule description for events with a broken process lineage + BrokenProcessLineageErrorRuleDesc = "Broken process lineage detected" ) type CustomEventCommonFields struct { @@ -49,10 +72,10 @@ func (commonFields *CustomEventCommonFields) FillCustomEventCommonFields() { } // NewCustomRule returns a new custom rule -func NewCustomRule(id eval.RuleID) *rules.Rule { +func NewCustomRule(id eval.RuleID, description string) *rules.Rule { return &rules.Rule{ Rule: &eval.Rule{ID: id}, - Definition: &rules.RuleDefinition{ID: id}, + Definition: &rules.RuleDefinition{ID: id, Description: description}, } } diff --git a/pkg/security/metrics/metrics.go b/pkg/security/metrics/metrics.go index ed9ea79fd201f..1c0d115e1d9f3 100644 --- a/pkg/security/metrics/metrics.go +++ b/pkg/security/metrics/metrics.go @@ -192,6 +192,9 @@ var ( // bytes // Tags: format, storage_type, compression MetricActivityDumpSizeInBytes = newRuntimeMetric(".activity_dump.size_in_bytes") + // MetricActivityDumpPersistedDumps is the name of the metric used to reported the number of dumps that were persisted + // Tags: format, storage_type, compression + MetricActivityDumpPersistedDumps = newRuntimeMetric(".activity_dump.persisted_dumps") // MetricActivityDumpActiveDumps is the name of the metric used to report the number of active dumps // Tags: - MetricActivityDumpActiveDumps = newRuntimeMetric(".activity_dump.active_dumps") @@ -238,9 +241,9 @@ var ( // Security Profile metrics - // MetricSecurityProfileActiveProfiles is the name of the metric used to report the count of active Security Profiles - // Tags: - - MetricSecurityProfileActiveProfiles = newRuntimeMetric(".security_profile.active_profiles") + // MetricSecurityProfileProfiles is the name of the metric used to report the count of Security Profiles per category + // Tags: in_kernel (true or false), anomaly_detection (true or false), auto_suppression (true or false), workload_hardening (true or false) + MetricSecurityProfileProfiles = newRuntimeMetric(".security_profile.profiles") // MetricSecurityProfileCacheLen is the name of the metric used to report the size of the Security Profile cache // Tags: - MetricSecurityProfileCacheLen = newRuntimeMetric(".security_profile.cache.len") @@ -251,10 +254,10 @@ var ( // Tags: - MetricSecurityProfileCacheMiss = newRuntimeMetric(".security_profile.cache.miss") // MetricSecurityProfileAnomalyDetectionSent - // Tags: - event_type + // Tags: event_type MetricSecurityProfileAnomalyDetectionSent = newRuntimeMetric(".security_profile.anomaly_detection.sent") // MetricSecurityProfileEventFiltering - // Tags: - event_type, in_profile ('true', 'false', 'no_profile' or 'unstable_profile') + // Tags: event_type, profile_state ('no_profile', 'unstable', 'unstable_event_type', 'stable', 'auto_learning', 'workload_warmup'), in_profile ('true', 'false' or none) MetricSecurityProfileEventFiltering = newRuntimeMetric(".security_profile.evaluation.hit") // Namespace resolver metrics diff --git a/pkg/security/module/cws.go b/pkg/security/module/cws.go index 17435e54df45e..f8dec3954f13d 100644 --- a/pkg/security/module/cws.go +++ b/pkg/security/module/cws.go @@ -398,6 +398,10 @@ func (c *CWSConsumer) Stop() { signal.Stop(c.sigupChan) close(c.sigupChan) + if c.apiServer != nil { + c.apiServer.Stop() + } + for _, provider := range c.policyProviders { _ = provider.Close() } diff --git a/pkg/security/module/policy_monitor.go b/pkg/security/module/policy_monitor.go index e4dcf26379729..e032375423a66 100644 --- a/pkg/security/module/policy_monitor.go +++ b/pkg/security/module/policy_monitor.go @@ -231,6 +231,6 @@ func NewRuleSetLoadedEvent(ruleSets map[string]*rules.RuleSet, err *multierror.E } evt.FillCustomEventCommonFields() - return events.NewCustomRule(events.RulesetLoadedRuleID), + return events.NewCustomRule(events.RulesetLoadedRuleID, events.RulesetLoadedRuleDesc), events.NewCustomEvent(model.CustomRulesetLoadedEventType, evt) } diff --git a/pkg/security/module/rate_limiter.go b/pkg/security/module/rate_limiter.go index e41ee34dfdfd6..099f76707380c 100644 --- a/pkg/security/module/rate_limiter.go +++ b/pkg/security/module/rate_limiter.go @@ -169,7 +169,7 @@ func (rl *RateLimiter) applyBaseLimitersFromDefault(limiters map[string]Limiter) for id, limiter := range defaultPerRuleLimiters { limiters[id] = limiter } - limiters[events.AnomalyDetectionRuleID] = NewAnomalyDetectionLimiter(rate.Every(time.Duration(rl.config.AnomalyDetectionRateLimiter)*time.Second), 1) + limiters[events.AnomalyDetectionRuleID] = NewAnomalyDetectionLimiter(rate.Every(rl.config.AnomalyDetectionRateLimiter), 1) } // Apply a set of rules @@ -226,8 +226,9 @@ func (rl *RateLimiter) GetStats() map[string][]LimiterStat { // for the set of rules func (rl *RateLimiter) SendStats() error { for ruleID, stats := range rl.GetStats() { - tags := []string{fmt.Sprintf("rule_id:%s", ruleID)} + ruleIDTag := fmt.Sprintf("rule_id:%s", ruleID) for _, stat := range stats { + tags := []string{ruleIDTag} if len(stat.tags) > 0 { tags = append(tags, stat.tags...) } diff --git a/pkg/security/module/self_tests.go b/pkg/security/module/self_tests.go index da92f41369062..daa354bc39287 100644 --- a/pkg/security/module/self_tests.go +++ b/pkg/security/module/self_tests.go @@ -34,7 +34,7 @@ func NewSelfTestEvent(success []string, fails []string) (*rules.Rule, *events.Cu } evt.FillCustomEventCommonFields() - return events.NewCustomRule(events.SelfTestRuleID), + return events.NewCustomRule(events.SelfTestRuleID, events.SelfTestRuleDesc), events.NewCustomEvent(model.CustomSelfTestEventType, evt) } diff --git a/pkg/security/module/server.go b/pkg/security/module/server.go index 1e0e1f81b505e..7b996c3c115d1 100644 --- a/pkg/security/module/server.go +++ b/pkg/security/module/server.go @@ -22,15 +22,20 @@ import ( "go.uber.org/atomic" "golang.org/x/time/rate" + pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/security/common" "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/metrics" sprobe "github.com/DataDog/datadog-agent/pkg/security/probe" "github.com/DataDog/datadog-agent/pkg/security/proto/api" + "github.com/DataDog/datadog-agent/pkg/security/reporter" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" "github.com/DataDog/datadog-agent/pkg/security/seclog" "github.com/DataDog/datadog-agent/pkg/security/serializers" "github.com/DataDog/datadog-agent/pkg/util/kernel" + "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/DataDog/datadog-agent/pkg/util/startstop" "github.com/DataDog/datadog-agent/pkg/version" ) @@ -48,6 +53,7 @@ type pendingMsg struct { type APIServer struct { api.UnimplementedSecurityModuleServer msgs chan *api.SecurityEventMessage + directReporter common.RawReporter activityDumps chan *api.ActivityDumpStreamMessage expiredEventsLock sync.RWMutex expiredEvents map[rules.RuleID]*atomic.Int64 @@ -60,6 +66,8 @@ type APIServer struct { retention time.Duration cfg *config.RuntimeSecurityConfig cwsConsumer *CWSConsumer + + stopper startstop.Stopper } // GetActivityDumpStream waits for activity dumps and forwards them to the stream @@ -213,6 +221,32 @@ func (a *APIServer) TranscodingRequest(ctx context.Context, params *api.Transcod return nil, fmt.Errorf("monitor not configured") } +// ListSecurityProfiles returns the list of security profiles +func (a *APIServer) ListSecurityProfiles(ctx context.Context, params *api.SecurityProfileListParams) (*api.SecurityProfileListMessage, error) { + if monitor := a.probe.GetMonitor(); monitor != nil { + msg, err := monitor.ListSecurityProfiles(params) + if err != nil { + seclog.Errorf(err.Error()) + } + return msg, nil + } + + return nil, fmt.Errorf("monitor not configured") +} + +// SaveSecurityProfile saves the requested security profile to disk +func (a *APIServer) SaveSecurityProfile(ctx context.Context, params *api.SecurityProfileSaveParams) (*api.SecurityProfileSaveMessage, error) { + if monitor := a.probe.GetMonitor(); monitor != nil { + msg, err := monitor.SaveSecurityProfile(params) + if err != nil { + seclog.Errorf(err.Error()) + } + return msg, nil + } + + return nil, fmt.Errorf("monitor not configured") +} + // GetStatus returns the status of the module func (a *APIServer) GetStatus(ctx context.Context, params *api.GetStatusParams) (*api.Status, error) { status, err := a.probe.GetConstantFetcherStatus() @@ -323,23 +357,10 @@ func (a *APIServer) start(ctx context.Context) { Tags: msg.tags, } - select { - case a.msgs <- m: - break - default: - // The channel is full, consume the oldest event - oldestMsg := <-a.msgs - // Try to send the event again - select { - case a.msgs <- m: - break - default: - // Looks like the channel is full again, expire the current message too - a.expireEvent(m) - break - } - a.expireEvent(oldestMsg) - break + if a.directReporter != nil { + a.sendDirectly(m) + } else { + a.sendToSecurityAgent(m) } }) case <-ctx.Done(): @@ -348,6 +369,31 @@ func (a *APIServer) start(ctx context.Context) { } } +func (a *APIServer) sendToSecurityAgent(m *api.SecurityEventMessage) { + select { + case a.msgs <- m: + break + default: + // The channel is full, consume the oldest event + oldestMsg := <-a.msgs + // Try to send the event again + select { + case a.msgs <- m: + break + default: + // Looks like the channel is full again, expire the current message too + a.expireEvent(m) + break + } + a.expireEvent(oldestMsg) + break + } +} + +func (a *APIServer) sendDirectly(m *api.SecurityEventMessage) { + a.directReporter.ReportRaw(m.Data, m.Service, m.Tags...) +} + // Start the api server, starts to consume the msg queue func (a *APIServer) Start(ctx context.Context) { go a.start(ctx) @@ -436,6 +482,7 @@ func (a *APIServer) SendEvent(rule *rules.Rule, event Event, extTagsCb func() [] msg.tags = append(msg.tags, "rule_id:"+rule.Definition.ID) msg.tags = append(msg.tags, rule.Tags...) msg.tags = append(msg.tags, event.GetTags()...) + msg.tags = append(msg.tags, common.QueryAccountIdTag()) a.enqueue(msg) } @@ -521,18 +568,56 @@ func (a *APIServer) Apply(ruleIDs []rules.RuleID) { } } +func (a *APIServer) Stop() { + a.stopper.Stop() +} + // NewAPIServer returns a new gRPC event server func NewAPIServer(cfg *config.RuntimeSecurityConfig, probe *sprobe.Probe, client statsd.ClientInterface) *APIServer { + stopper := startstop.NewSerialStopper() + directReporter, err := newDirectReporter(stopper) + if err != nil { + log.Errorf("failed to setup direct reporter: %v", err) + directReporter = nil + } + es := &APIServer{ - msgs: make(chan *api.SecurityEventMessage, cfg.EventServerBurst*3), - activityDumps: make(chan *api.ActivityDumpStreamMessage, model.MaxTracedCgroupsCount*2), - expiredEvents: make(map[rules.RuleID]*atomic.Int64), - expiredDumps: atomic.NewInt64(0), - limiter: NewStdLimiter(rate.Limit(cfg.EventServerRate), cfg.EventServerBurst), - statsdClient: client, - probe: probe, - retention: cfg.EventServerRetention, - cfg: cfg, + msgs: make(chan *api.SecurityEventMessage, cfg.EventServerBurst*3), + directReporter: directReporter, + activityDumps: make(chan *api.ActivityDumpStreamMessage, model.MaxTracedCgroupsCount*2), + expiredEvents: make(map[rules.RuleID]*atomic.Int64), + expiredDumps: atomic.NewInt64(0), + limiter: NewStdLimiter(rate.Limit(cfg.EventServerRate), cfg.EventServerBurst), + statsdClient: client, + probe: probe, + retention: cfg.EventServerRetention, + cfg: cfg, + stopper: stopper, } return es } + +func newDirectReporter(stopper startstop.Stopper) (common.RawReporter, error) { + directReportEnabled := pkgconfig.SystemProbe.GetBool("runtime_security_config.direct_send_from_system_probe") + if !directReportEnabled { + return nil, nil + } + + runPath := pkgconfig.Datadog.GetString("runtime_security_config.run_path") + + endpoints, destinationsCtx, err := common.NewLogContextRuntime() + if err != nil { + return nil, fmt.Errorf("failed to create direct reported endpoints: %w", err) + } + + for _, status := range endpoints.GetStatus() { + log.Info(status) + } + + reporter, err := reporter.NewCWSReporter(runPath, stopper, endpoints, destinationsCtx) + if err != nil { + return nil, fmt.Errorf("failed to create direct reporter: %w", err) + } + + return reporter, nil +} diff --git a/pkg/security/probe/config/config.go b/pkg/security/probe/config/config.go index b3477b8632dc6..2505d5fb7fa0a 100644 --- a/pkg/security/probe/config/config.go +++ b/pkg/security/probe/config/config.go @@ -11,6 +11,7 @@ import ( "strings" "time" + sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" coreconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/process/util" @@ -136,6 +137,10 @@ type Config struct { // NewConfig returns a new Config object func NewConfig() (*Config, error) { + if !sysconfig.IsAdjusted(coreconfig.SystemProbe) { + sysconfig.Adjust(coreconfig.SystemProbe) + } + setEnv() c := &Config{ diff --git a/pkg/security/probe/constantfetch/btfhub/constants.json b/pkg/security/probe/constantfetch/btfhub/constants.json index b7b1ae7e574f9..16b26a6977689 100644 --- a/pkg/security/probe/constantfetch/btfhub/constants.json +++ b/pkg/security/probe/constantfetch/btfhub/constants.json @@ -1,5 +1,5 @@ { - "commit": "268bd979293a6d1b42ce8eb4b597c8bc964896c3", + "commit": "8cf236a3f30f50eb9460626ca07e1d5cd02e7dc4", "constants": [ { "binprm_file_offset": 168, @@ -29078,6 +29078,13 @@ "uname_release": "4.15.0-1150-azure", "cindex": 140 }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "x86_64", + "uname_release": "4.15.0-1150-gcp", + "cindex": 115 + }, { "distrib": "ubuntu", "version": "18.04", @@ -29176,6 +29183,13 @@ "uname_release": "4.15.0-1164-azure", "cindex": 140 }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "x86_64", + "uname_release": "4.15.0-1165-azure", + "cindex": 140 + }, { "distrib": "ubuntu", "version": "18.04", diff --git a/pkg/security/probe/constantfetch/fallback.go b/pkg/security/probe/constantfetch/fallback.go index 86e8c2ddce9ed..a3b73dd5d5637 100644 --- a/pkg/security/probe/constantfetch/fallback.go +++ b/pkg/security/probe/constantfetch/fallback.go @@ -177,7 +177,11 @@ func getSizeOfStructInode(kv *kernel.Version) uint64 { case kv.IsAmazonLinuxKernel() && kv.IsInRangeCloseOpen(kernel.Kernel5_4, kernel.Kernel5_5): sizeOf = 584 case kv.IsAmazonLinuxKernel() && kv.IsInRangeCloseOpen(kernel.Kernel5_10, kernel.Kernel5_11): - sizeOf = 584 + if kv.Code.Patch() > 100 { + sizeOf = 592 + } else { + sizeOf = 584 + } case kv.IsAmazonLinuxKernel() && kv.IsInRangeCloseOpen(kernel.Kernel5_15, kernel.Kernel5_16): sizeOf = 616 case kv.IsInRangeCloseOpen(kernel.Kernel4_15, kernel.Kernel4_16): @@ -242,8 +246,10 @@ func getSignalTTYOffset(kv *kernel.Version) uint64 { return 368 case kv.IsUbuntuKernel() && kv.IsInRangeCloseOpen(kernel.Kernel4_16, kernel.Kernel4_19): return 376 - case kv.IsUbuntuKernel(): + case kv.IsUbuntuKernel() && kv.Code < kernel.Kernel5_19: return 400 + getNoHzOffset() + case kv.IsUbuntuKernel() && kv.Code >= kernel.Kernel5_19: + return 408 + getNoHzOffset() case kv.Code >= kernel.Kernel5_16: return 416 } @@ -291,8 +297,10 @@ func getBpfMapIDOffset(kv *kernel.Version) uint64 { switch { case kv.IsInRangeCloseOpen(kernel.Kernel5_15, kernel.Kernel5_16): return 52 - case kv.Code >= kernel.Kernel5_16: + case kv.IsInRangeCloseOpen(kernel.Kernel5_16, kernel.Kernel5_19): return 60 + case kv.Code >= kernel.Kernel5_19: + return 68 default: return 48 } @@ -331,8 +339,10 @@ func getBpfMapNameOffset(kv *kernel.Version) uint64 { nameOffset = 80 case kv.IsInRangeCloseOpen(kernel.Kernel5_15, kernel.Kernel5_16): nameOffset = 88 - case kv.Code >= kernel.Kernel5_16: + case kv.IsInRangeCloseOpen(kernel.Kernel5_16, kernel.Kernel5_19): nameOffset = 96 + case kv.Code >= kernel.Kernel5_19: + nameOffset = 104 case kv.Code != 0 && kv.Code < kernel.Kernel4_15: return ErrorSentinel } @@ -444,8 +454,10 @@ func getBpfProgAuxNameOffset(kv *kernel.Version) uint64 { nameOffset = 504 case kv.IsInRangeCloseOpen(kernel.Kernel5_13, kernel.Kernel5_16): nameOffset = 528 - case kv.Code != 0 && kv.Code >= kernel.Kernel5_16: + case kv.IsInRangeCloseOpen(kernel.Kernel5_16, kernel.Kernel5_17): nameOffset = 544 + case kv.Code != 0 && kv.Code >= kernel.Kernel5_17: + nameOffset = 528 } return nameOffset @@ -633,12 +645,14 @@ func getNetDeviceIfindexOffset(kv *kernel.Version) uint64 { case kv.IsSuse15Kernel(): offset = 256 - case kv.Code >= kernel.Kernel4_14 && kv.Code < kernel.Kernel5_8: + case kv.IsInRangeCloseOpen(kernel.Kernel4_14, kernel.Kernel5_8): offset = 264 - case kv.Code >= kernel.Kernel5_8 && kv.Code < kernel.Kernel5_12: + case kv.IsInRangeCloseOpen(kernel.Kernel5_8, kernel.Kernel5_12): offset = 256 - case kv.Code >= kernel.Kernel5_12: + case kv.IsInRangeCloseOpen(kernel.Kernel5_12, kernel.Kernel5_17): offset = 208 + case kv.Code >= kernel.Kernel5_17: + offset = 216 } return offset @@ -709,16 +723,16 @@ func getSocketSockOffset(kv *kernel.Version) uint64 { } func getNFConnCTNetOffset(kv *kernel.Version) uint64 { - offset := uint64(144) - switch { case kv.IsCOSKernel(): - offset = 168 + return 168 case kv.IsRH7Kernel(): - offset = 240 + return 240 + case kv.Code >= kernel.Kernel5_19: + return 136 + default: + return 144 } - - return offset } func getSockCommonSKCFamilyOffset(kv *kernel.Version) uint64 { @@ -738,8 +752,10 @@ func getFlowi4SAddrOffset(kv *kernel.Version) uint64 { case kv.IsInRangeCloseOpen(kernel.Kernel5_0, kernel.Kernel5_1): offset = 32 - case kv.Code >= kernel.Kernel5_1: + case kv.IsInRangeCloseOpen(kernel.Kernel5_1, kernel.Kernel5_18): offset = 40 + case kv.Code >= kernel.Kernel5_18: + offset = 48 } return offset @@ -801,6 +817,8 @@ func getIoKcbCtxOffset(kv *kernel.Version) uint64 { return 80 case kv.IsUbuntuKernel() && kv.IsInRangeCloseOpen(kernel.Kernel5_4, kernel.Kernel5_5): return 96 + case kv.Code >= kernel.Kernel5_16: + return 88 default: return 80 } diff --git a/pkg/security/probe/custom_events.go b/pkg/security/probe/custom_events.go index 5f8ed1bd3b872..b6614ae4b62a3 100644 --- a/pkg/security/probe/custom_events.go +++ b/pkg/security/probe/custom_events.go @@ -36,7 +36,7 @@ func NewEventLostReadEvent(mapName string, lost float64) (*rules.Rule, *events.C } evt.FillCustomEventCommonFields() - return events.NewCustomRule(events.LostEventsRuleID), events.NewCustomEvent(model.CustomLostReadEventType, evt) + return events.NewCustomRule(events.LostEventsRuleID, events.LostEventsRuleDesc), events.NewCustomEvent(model.CustomLostReadEventType, evt) } // EventLostWrite is the event used to report lost events detected from kernel space @@ -55,7 +55,7 @@ func NewEventLostWriteEvent(mapName string, perEventPerCPU map[string]uint64) (* } evt.FillCustomEventCommonFields() - return events.NewCustomRule(events.LostEventsRuleID), events.NewCustomEvent(model.CustomLostWriteEventType, evt) + return events.NewCustomRule(events.LostEventsRuleID, events.LostEventsRuleDesc), events.NewCustomEvent(model.CustomLostWriteEventType, evt) } // NoisyProcessEvent is used to report that a noisy process was temporarily discarded @@ -91,7 +91,7 @@ func NewNoisyProcessEvent(count uint64, // Overwrite common timestamp evt.Timestamp = timestamp - return events.NewCustomRule(events.NoisyProcessRuleID), events.NewCustomEvent(model.CustomNoisyProcessEventType, evt) + return events.NewCustomRule(events.NoisyProcessRuleID, events.NoisyProcessRuleDesc), events.NewCustomEvent(model.CustomNoisyProcessEventType, evt) } func errorToEventType(err error) model.EventType { @@ -112,7 +112,7 @@ type AbnormalEvent struct { } // NewAbnormalPathEvent returns the rule and a populated custom event for a abnormal_path event -func NewAbnormalEvent(id string, event *model.Event, probe *Probe, err error) (*rules.Rule, *events.CustomEvent) { +func NewAbnormalEvent(id string, description string, event *model.Event, probe *Probe, err error) (*rules.Rule, *events.CustomEvent) { marshalerCtor := func() easyjson.Marshaler { evt := AbnormalEvent{ Event: serializers.NewEventSerializer(event, probe.resolvers), @@ -125,5 +125,5 @@ func NewAbnormalEvent(id string, event *model.Event, probe *Probe, err error) (* return evt } - return events.NewCustomRule(id), events.NewCustomEventLazy(errorToEventType(err), marshalerCtor) + return events.NewCustomRule(id, description), events.NewCustomEventLazy(errorToEventType(err), marshalerCtor) } diff --git a/pkg/security/probe/field_handlers_linux.go b/pkg/security/probe/field_handlers_linux.go index f3f5a072a66e3..1f3baae02817b 100644 --- a/pkg/security/probe/field_handlers_linux.go +++ b/pkg/security/probe/field_handlers_linux.go @@ -183,7 +183,7 @@ func (fh *FieldHandlers) ResolveProcessCreatedAt(ev *model.Event, e *model.Proce // ResolveProcessArgv0 resolves the first arg of the event func (fh *FieldHandlers) ResolveProcessArgv0(ev *model.Event, process *model.Process) string { - arg0, _ := fh.resolvers.ProcessResolver.GetProcessArgv0(process) + arg0, _ := sprocess.GetProcessArgv0(process) return arg0 } @@ -292,28 +292,28 @@ func (fh *FieldHandlers) ResolveSELinuxBoolName(ev *model.Event, e *model.SELinu return e.BoolName } +// GetProcessCacheEntry queries the ProcessResolver to retrieve the ProcessContext of the event +func (fh *FieldHandlers) GetProcessCacheEntry(ev *model.Event) (*model.ProcessCacheEntry, bool) { + ev.ProcessCacheEntry = fh.resolvers.ProcessResolver.Resolve(ev.PIDContext.Pid, ev.PIDContext.Tid, ev.PIDContext.ExecInode, false) + if ev.ProcessCacheEntry == nil { + ev.ProcessCacheEntry = model.NewEmptyProcessCacheEntry(ev.PIDContext.Pid, ev.PIDContext.Tid, false) + return ev.ProcessCacheEntry, false + } + return ev.ProcessCacheEntry, true +} + // ResolveProcessCacheEntry queries the ProcessResolver to retrieve the ProcessContext of the event func (fh *FieldHandlers) ResolveProcessCacheEntry(ev *model.Event) (*model.ProcessCacheEntry, bool) { if ev.PIDContext.IsKworker { return model.NewEmptyProcessCacheEntry(ev.PIDContext.Pid, ev.PIDContext.Tid, true), false } - if ev.ProcessCacheEntry == nil { - ev.ProcessCacheEntry = fh.resolvers.ProcessResolver.Resolve(ev.PIDContext.Pid, ev.PIDContext.Tid, ev.PIDContext.Inode) + if ev.ProcessCacheEntry == nil && ev.PIDContext.Pid != 0 { + ev.ProcessCacheEntry = fh.resolvers.ProcessResolver.Resolve(ev.PIDContext.Pid, ev.PIDContext.Tid, ev.PIDContext.ExecInode, true) } if ev.ProcessCacheEntry == nil { - // keep the original PIDContext - ev.ProcessCacheEntry = model.NewProcessCacheEntry(nil) - ev.ProcessCacheEntry.PIDContext = ev.PIDContext - - ev.ProcessCacheEntry.FileEvent.SetPathnameStr("") - ev.ProcessCacheEntry.FileEvent.SetBasenameStr("") - - // mark interpreter as resolved too - ev.ProcessCacheEntry.LinuxBinprm.FileEvent.SetPathnameStr("") - ev.ProcessCacheEntry.LinuxBinprm.FileEvent.SetBasenameStr("") - + ev.ProcessCacheEntry = model.NewEmptyProcessCacheEntry(ev.PIDContext.Pid, ev.PIDContext.Tid, false) return ev.ProcessCacheEntry, false } diff --git a/pkg/security/probe/load_controller.go b/pkg/security/probe/load_controller.go index 55c92ddf99b6c..6a4b9072a13cc 100644 --- a/pkg/security/probe/load_controller.go +++ b/pkg/security/probe/load_controller.go @@ -151,7 +151,7 @@ func (lc *LoadController) discardNoisiestProcess() { lc.pidDiscardersCount.Inc() if lc.NoisyProcessCustomEventRate.Allow() { - process := lc.probe.resolvers.ProcessResolver.Resolve(maxKey.Pid, maxKey.Pid, 0) + process := lc.probe.resolvers.ProcessResolver.Resolve(maxKey.Pid, maxKey.Pid, 0, false) if process == nil { seclog.Warnf("Unable to resolve process with pid: %d", maxKey.Pid) return diff --git a/pkg/security/probe/probe_linux.go b/pkg/security/probe/probe_linux.go index b7fbad87e041f..2b06639c0d67c 100644 --- a/pkg/security/probe/probe_linux.go +++ b/pkg/security/probe/probe_linux.go @@ -332,7 +332,7 @@ func (p *Probe) sendAnomalyDetection(event *model.Event) { } p.DispatchCustomEvent( - events.NewCustomRule(events.AnomalyDetectionRuleID), + events.NewCustomRule(events.AnomalyDetectionRuleID, events.AnomalyDetectionRuleDesc), events.NewCustomEventLazy(event.GetEventType(), p.EventMarshallerCtor(event), tags...), ) p.anomalyDetectionSent[event.GetEventType()].Inc() @@ -721,7 +721,7 @@ func (p *Probe) handleEvent(CPU int, data []byte) { } if err = p.resolvers.ProcessResolver.ResolveNewProcessCacheEntry(event.ProcessCacheEntry, event.ContainerContext); err != nil { - seclog.Debugf("failed to resolve new process cache entry context: %s", err) + seclog.Debugf("failed to resolve new process cache entry context for pid %d: %s", event.PIDContext.Pid, err) var errResolution *path.ErrPathResolution if errors.As(err, &errResolution) { @@ -739,7 +739,7 @@ func (p *Probe) handleEvent(CPU int, data []byte) { } var exists bool - event.ProcessCacheEntry, exists = p.fieldHandlers.ResolveProcessCacheEntry(event) + event.ProcessCacheEntry, exists = p.fieldHandlers.GetProcessCacheEntry(event) if !exists { // no need to dispatch an exit event that don't have the corresponding cache entry return @@ -786,7 +786,7 @@ func (p *Probe) handleEvent(CPU int, data []byte) { // resolve tracee process context var pce *model.ProcessCacheEntry if event.PTrace.PID > 0 { // pid can be 0 for a PTRACE_TRACEME request - pce = p.resolvers.ProcessResolver.Resolve(event.PTrace.PID, event.PTrace.PID, 0) + pce = p.resolvers.ProcessResolver.Resolve(event.PTrace.PID, event.PTrace.PID, 0, false) } if pce == nil { pce = model.NewEmptyProcessCacheEntry(event.PTrace.PID, event.PTrace.PID, false) @@ -831,7 +831,7 @@ func (p *Probe) handleEvent(CPU int, data []byte) { // resolve target process context var pce *model.ProcessCacheEntry if event.Signal.PID > 0 { // Linux accepts a kill syscall with both negative and zero pid - pce = p.resolvers.ProcessResolver.Resolve(event.Signal.PID, event.Signal.PID, 0) + pce = p.resolvers.ProcessResolver.Resolve(event.Signal.PID, event.Signal.PID, 0, false) } if pce == nil { pce = model.NewEmptyProcessCacheEntry(event.Signal.PID, event.Signal.PID, false) @@ -1352,7 +1352,7 @@ func (p *Probe) handleNewMount(ev *model.Event, m *model.Mount) error { } // Insert new mount point in cache, passing it a copy of the mount that we got from the event - if err := p.resolvers.MountResolver.Insert(*m, ev.PIDContext.Pid, ev.FieldHandlers.ResolveContainerID(ev, ev.ContainerContext)); err != nil { + if err := p.resolvers.MountResolver.Insert(*m); err != nil { seclog.Errorf("failed to insert mount event: %v", err) return err } @@ -1835,3 +1835,7 @@ func (p *Probe) IsActivityDumpEnabled() bool { func (p *Probe) IsActivityDumpTagRulesEnabled() bool { return p.Config.RuntimeSecurity.ActivityDumpTagRulesEnabled } + +func (p *Probe) IsSecurityProfileEnabled() bool { + return p.Config.RuntimeSecurity.SecurityProfileEnabled +} diff --git a/pkg/security/probe/probe_monitor.go b/pkg/security/probe/probe_monitor.go index d5f3d50424efd..45b8b667f892d 100644 --- a/pkg/security/probe/probe_monitor.go +++ b/pkg/security/probe/probe_monitor.go @@ -102,6 +102,11 @@ func (m *Monitor) GetActivityDumpManager() *dump.ActivityDumpManager { return m.activityDumpManager } +// GetSecurityProfileManager returns the activity dump manager +func (m *Monitor) GetSecurityProfileManager() *profile.SecurityProfileManager { + return m.securityProfileManager +} + // Start triggers the goroutine of all the underlying controllers and monitors of the Monitor func (m *Monitor) Start(ctx context.Context, wg *sync.WaitGroup) error { delta := 1 @@ -207,7 +212,7 @@ func (m *Monitor) ProcessEvent(event *model.Event) { var pathErr *path.ErrPathResolution if errors.As(event.Error, &pathErr) { m.probe.DispatchCustomEvent( - NewAbnormalEvent(events.AbnormalPathRuleID, event, m.probe, pathErr.Err), + NewAbnormalEvent(events.AbnormalPathRuleID, events.AbnormalPathRuleDesc, event, m.probe, pathErr.Err), ) return } @@ -215,7 +220,7 @@ func (m *Monitor) ProcessEvent(event *model.Event) { var processContextErr *ErrNoProcessContext if errors.As(event.Error, &processContextErr) { m.probe.DispatchCustomEvent( - NewAbnormalEvent(events.NoProcessContextErrorRuleID, event, m.probe, event.Error), + NewAbnormalEvent(events.NoProcessContextErrorRuleID, events.NoProcessContextErrorRuleDesc, event, m.probe, event.Error), ) return } @@ -223,7 +228,7 @@ func (m *Monitor) ProcessEvent(event *model.Event) { var brokenLineageErr *ErrProcessBrokenLineage if errors.As(event.Error, &brokenLineageErr) { m.probe.DispatchCustomEvent( - NewAbnormalEvent(events.BrokenProcessLineageErrorRuleID, event, m.probe, event.Error), + NewAbnormalEvent(events.BrokenProcessLineageErrorRuleID, events.BrokenProcessLineageErrorRuleDesc, event, m.probe, event.Error), ) return } @@ -252,6 +257,19 @@ func (m *Monitor) ListActivityDumps(params *api.ActivityDumpListParams) (*api.Ac return m.activityDumpManager.ListActivityDumps(params) } +// ErrSecurityProfileManagerDisabled is returned when the security profile manager is disabled +var ErrSecurityProfileManagerDisabled = errors.New("SecurityProfileManager is disabled") + +// ListSecurityProfiles returns the list of security profiles +func (m *Monitor) ListSecurityProfiles(params *api.SecurityProfileListParams) (*api.SecurityProfileListMessage, error) { + if !m.probe.IsSecurityProfileEnabled() { + return &api.SecurityProfileListMessage{ + Error: ErrSecurityProfileManagerDisabled.Error(), + }, ErrSecurityProfileManagerDisabled + } + return m.securityProfileManager.ListSecurityProfiles(params) +} + // StopActivityDump stops an active activity dump func (m *Monitor) StopActivityDump(params *api.ActivityDumpStopParams) (*api.ActivityDumpStopMessage, error) { if !m.probe.IsActivityDumpEnabled() { @@ -275,3 +293,13 @@ func (m *Monitor) GenerateTranscoding(params *api.TranscodingRequestParams) (*ap func (m *Monitor) GetActivityDumpTracedEventTypes() []model.EventType { return m.probe.Config.RuntimeSecurity.ActivityDumpTracedEventTypes } + +// SaveSecurityProfile saves the requested security profile to disk +func (m *Monitor) SaveSecurityProfile(params *api.SecurityProfileSaveParams) (*api.SecurityProfileSaveMessage, error) { + if !m.probe.IsSecurityProfileEnabled() { + return &api.SecurityProfileSaveMessage{ + Error: ErrSecurityProfileManagerDisabled.Error(), + }, ErrSecurityProfileManagerDisabled + } + return m.securityProfileManager.SaveSecurityProfile(params) +} diff --git a/pkg/security/proto/api/api.pb.go b/pkg/security/proto/api/api.pb.go index 8c880abc8e8f3..8b678c67741a6 100644 --- a/pkg/security/proto/api/api.pb.go +++ b/pkg/security/proto/api/api.pb.go @@ -1127,7 +1127,7 @@ type ActivityDumpParams struct { unknownFields protoimpl.UnknownFields Comm string `protobuf:"bytes,1,opt,name=Comm,proto3" json:"Comm,omitempty"` - Timeout int32 `protobuf:"varint,2,opt,name=Timeout,proto3" json:"Timeout,omitempty"` + Timeout string `protobuf:"bytes,2,opt,name=Timeout,proto3" json:"Timeout,omitempty"` DifferentiateArgs bool `protobuf:"varint,4,opt,name=DifferentiateArgs,proto3" json:"DifferentiateArgs,omitempty"` Storage *StorageRequestParams `protobuf:"bytes,5,opt,name=Storage,proto3" json:"Storage,omitempty"` ContainerID string `protobuf:"bytes,6,opt,name=ContainerID,proto3" json:"ContainerID,omitempty"` @@ -1172,11 +1172,11 @@ func (x *ActivityDumpParams) GetComm() string { return "" } -func (x *ActivityDumpParams) GetTimeout() int32 { +func (x *ActivityDumpParams) GetTimeout() string { if x != nil { return x.Timeout } - return 0 + return "" } func (x *ActivityDumpParams) GetDifferentiateArgs() bool { @@ -1200,7 +1200,7 @@ func (x *ActivityDumpParams) GetContainerID() string { return "" } -type ActivityDumpMetadataMessage struct { +type MetadataMessage struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields @@ -1221,8 +1221,8 @@ type ActivityDumpMetadataMessage struct { Serialization string `protobuf:"bytes,14,opt,name=Serialization,proto3" json:"Serialization,omitempty"` } -func (x *ActivityDumpMetadataMessage) Reset() { - *x = ActivityDumpMetadataMessage{} +func (x *MetadataMessage) Reset() { + *x = MetadataMessage{} if protoimpl.UnsafeEnabled { mi := &file_pkg_security_proto_api_api_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1230,13 +1230,13 @@ func (x *ActivityDumpMetadataMessage) Reset() { } } -func (x *ActivityDumpMetadataMessage) String() string { +func (x *MetadataMessage) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ActivityDumpMetadataMessage) ProtoMessage() {} +func (*MetadataMessage) ProtoMessage() {} -func (x *ActivityDumpMetadataMessage) ProtoReflect() protoreflect.Message { +func (x *MetadataMessage) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1248,103 +1248,103 @@ func (x *ActivityDumpMetadataMessage) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ActivityDumpMetadataMessage.ProtoReflect.Descriptor instead. -func (*ActivityDumpMetadataMessage) Descriptor() ([]byte, []int) { +// Deprecated: Use MetadataMessage.ProtoReflect.Descriptor instead. +func (*MetadataMessage) Descriptor() ([]byte, []int) { return file_pkg_security_proto_api_api_proto_rawDescGZIP(), []int{22} } -func (x *ActivityDumpMetadataMessage) GetAgentVersion() string { +func (x *MetadataMessage) GetAgentVersion() string { if x != nil { return x.AgentVersion } return "" } -func (x *ActivityDumpMetadataMessage) GetAgentCommit() string { +func (x *MetadataMessage) GetAgentCommit() string { if x != nil { return x.AgentCommit } return "" } -func (x *ActivityDumpMetadataMessage) GetKernelVersion() string { +func (x *MetadataMessage) GetKernelVersion() string { if x != nil { return x.KernelVersion } return "" } -func (x *ActivityDumpMetadataMessage) GetLinuxDistribution() string { +func (x *MetadataMessage) GetLinuxDistribution() string { if x != nil { return x.LinuxDistribution } return "" } -func (x *ActivityDumpMetadataMessage) GetArch() string { +func (x *MetadataMessage) GetArch() string { if x != nil { return x.Arch } return "" } -func (x *ActivityDumpMetadataMessage) GetName() string { +func (x *MetadataMessage) GetName() string { if x != nil { return x.Name } return "" } -func (x *ActivityDumpMetadataMessage) GetProtobufVersion() string { +func (x *MetadataMessage) GetProtobufVersion() string { if x != nil { return x.ProtobufVersion } return "" } -func (x *ActivityDumpMetadataMessage) GetDifferentiateArgs() bool { +func (x *MetadataMessage) GetDifferentiateArgs() bool { if x != nil { return x.DifferentiateArgs } return false } -func (x *ActivityDumpMetadataMessage) GetComm() string { +func (x *MetadataMessage) GetComm() string { if x != nil { return x.Comm } return "" } -func (x *ActivityDumpMetadataMessage) GetContainerID() string { +func (x *MetadataMessage) GetContainerID() string { if x != nil { return x.ContainerID } return "" } -func (x *ActivityDumpMetadataMessage) GetStart() string { +func (x *MetadataMessage) GetStart() string { if x != nil { return x.Start } return "" } -func (x *ActivityDumpMetadataMessage) GetTimeout() string { +func (x *MetadataMessage) GetTimeout() string { if x != nil { return x.Timeout } return "" } -func (x *ActivityDumpMetadataMessage) GetSize() uint64 { +func (x *MetadataMessage) GetSize() uint64 { if x != nil { return x.Size } return 0 } -func (x *ActivityDumpMetadataMessage) GetSerialization() string { +func (x *MetadataMessage) GetSerialization() string { if x != nil { return x.Serialization } @@ -1427,14 +1427,14 @@ type ActivityDumpMessage struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` - Source string `protobuf:"bytes,2,opt,name=Source,proto3" json:"Source,omitempty"` - Service string `protobuf:"bytes,3,opt,name=Service,proto3" json:"Service,omitempty"` - Tags []string `protobuf:"bytes,4,rep,name=Tags,proto3" json:"Tags,omitempty"` - Storage []*StorageRequestMessage `protobuf:"bytes,5,rep,name=Storage,proto3" json:"Storage,omitempty"` - Metadata *ActivityDumpMetadataMessage `protobuf:"bytes,6,opt,name=Metadata,proto3" json:"Metadata,omitempty"` - DNSNames []string `protobuf:"bytes,8,rep,name=DNSNames,proto3" json:"DNSNames,omitempty"` - Error string `protobuf:"bytes,7,opt,name=Error,proto3" json:"Error,omitempty"` + Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` + Source string `protobuf:"bytes,2,opt,name=Source,proto3" json:"Source,omitempty"` + Service string `protobuf:"bytes,3,opt,name=Service,proto3" json:"Service,omitempty"` + Tags []string `protobuf:"bytes,4,rep,name=Tags,proto3" json:"Tags,omitempty"` + Storage []*StorageRequestMessage `protobuf:"bytes,5,rep,name=Storage,proto3" json:"Storage,omitempty"` + Metadata *MetadataMessage `protobuf:"bytes,6,opt,name=Metadata,proto3" json:"Metadata,omitempty"` + DNSNames []string `protobuf:"bytes,8,rep,name=DNSNames,proto3" json:"DNSNames,omitempty"` + Error string `protobuf:"bytes,7,opt,name=Error,proto3" json:"Error,omitempty"` } func (x *ActivityDumpMessage) Reset() { @@ -1504,7 +1504,7 @@ func (x *ActivityDumpMessage) GetStorage() []*StorageRequestMessage { return nil } -func (x *ActivityDumpMessage) GetMetadata() *ActivityDumpMetadataMessage { +func (x *ActivityDumpMessage) GetMetadata() *MetadataMessage { if x != nil { return x.Metadata } @@ -1931,6 +1931,597 @@ func (x *ActivityDumpStreamMessage) GetData() []byte { return nil } +type WorkloadSelectorMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + Tag string `protobuf:"bytes,2,opt,name=Tag,proto3" json:"Tag,omitempty"` +} + +func (x *WorkloadSelectorMessage) Reset() { + *x = WorkloadSelectorMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_security_proto_api_api_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkloadSelectorMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkloadSelectorMessage) ProtoMessage() {} + +func (x *WorkloadSelectorMessage) ProtoReflect() protoreflect.Message { + mi := &file_pkg_security_proto_api_api_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkloadSelectorMessage.ProtoReflect.Descriptor instead. +func (*WorkloadSelectorMessage) Descriptor() ([]byte, []int) { + return file_pkg_security_proto_api_api_proto_rawDescGZIP(), []int{33} +} + +func (x *WorkloadSelectorMessage) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *WorkloadSelectorMessage) GetTag() string { + if x != nil { + return x.Tag + } + return "" +} + +type LastAnomalyTimestampMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + EventType string `protobuf:"bytes,1,opt,name=EventType,proto3" json:"EventType,omitempty"` + Timestamp string `protobuf:"bytes,2,opt,name=Timestamp,proto3" json:"Timestamp,omitempty"` + IsStableEventType bool `protobuf:"varint,3,opt,name=IsStableEventType,proto3" json:"IsStableEventType,omitempty"` +} + +func (x *LastAnomalyTimestampMessage) Reset() { + *x = LastAnomalyTimestampMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_security_proto_api_api_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LastAnomalyTimestampMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LastAnomalyTimestampMessage) ProtoMessage() {} + +func (x *LastAnomalyTimestampMessage) ProtoReflect() protoreflect.Message { + mi := &file_pkg_security_proto_api_api_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LastAnomalyTimestampMessage.ProtoReflect.Descriptor instead. +func (*LastAnomalyTimestampMessage) Descriptor() ([]byte, []int) { + return file_pkg_security_proto_api_api_proto_rawDescGZIP(), []int{34} +} + +func (x *LastAnomalyTimestampMessage) GetEventType() string { + if x != nil { + return x.EventType + } + return "" +} + +func (x *LastAnomalyTimestampMessage) GetTimestamp() string { + if x != nil { + return x.Timestamp + } + return "" +} + +func (x *LastAnomalyTimestampMessage) GetIsStableEventType() bool { + if x != nil { + return x.IsStableEventType + } + return false +} + +type InstanceMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=ContainerID,proto3" json:"ContainerID,omitempty"` + Tags []string `protobuf:"bytes,2,rep,name=Tags,proto3" json:"Tags,omitempty"` +} + +func (x *InstanceMessage) Reset() { + *x = InstanceMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_security_proto_api_api_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InstanceMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InstanceMessage) ProtoMessage() {} + +func (x *InstanceMessage) ProtoReflect() protoreflect.Message { + mi := &file_pkg_security_proto_api_api_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InstanceMessage.ProtoReflect.Descriptor instead. +func (*InstanceMessage) Descriptor() ([]byte, []int) { + return file_pkg_security_proto_api_api_proto_rawDescGZIP(), []int{35} +} + +func (x *InstanceMessage) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *InstanceMessage) GetTags() []string { + if x != nil { + return x.Tags + } + return nil +} + +type ActivityTreeStatsMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ProcessNodesCount int64 `protobuf:"varint,1,opt,name=ProcessNodesCount,proto3" json:"ProcessNodesCount,omitempty"` + FileNodesCount int64 `protobuf:"varint,2,opt,name=FileNodesCount,proto3" json:"FileNodesCount,omitempty"` + DNSNodesCount int64 `protobuf:"varint,3,opt,name=DNSNodesCount,proto3" json:"DNSNodesCount,omitempty"` + SocketNodesCount int64 `protobuf:"varint,4,opt,name=SocketNodesCount,proto3" json:"SocketNodesCount,omitempty"` + ApproximateSize int64 `protobuf:"varint,5,opt,name=ApproximateSize,proto3" json:"ApproximateSize,omitempty"` +} + +func (x *ActivityTreeStatsMessage) Reset() { + *x = ActivityTreeStatsMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_security_proto_api_api_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ActivityTreeStatsMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityTreeStatsMessage) ProtoMessage() {} + +func (x *ActivityTreeStatsMessage) ProtoReflect() protoreflect.Message { + mi := &file_pkg_security_proto_api_api_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityTreeStatsMessage.ProtoReflect.Descriptor instead. +func (*ActivityTreeStatsMessage) Descriptor() ([]byte, []int) { + return file_pkg_security_proto_api_api_proto_rawDescGZIP(), []int{36} +} + +func (x *ActivityTreeStatsMessage) GetProcessNodesCount() int64 { + if x != nil { + return x.ProcessNodesCount + } + return 0 +} + +func (x *ActivityTreeStatsMessage) GetFileNodesCount() int64 { + if x != nil { + return x.FileNodesCount + } + return 0 +} + +func (x *ActivityTreeStatsMessage) GetDNSNodesCount() int64 { + if x != nil { + return x.DNSNodesCount + } + return 0 +} + +func (x *ActivityTreeStatsMessage) GetSocketNodesCount() int64 { + if x != nil { + return x.SocketNodesCount + } + return 0 +} + +func (x *ActivityTreeStatsMessage) GetApproximateSize() int64 { + if x != nil { + return x.ApproximateSize + } + return 0 +} + +type SecurityProfileMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LoadedInKernel bool `protobuf:"varint,1,opt,name=LoadedInKernel,proto3" json:"LoadedInKernel,omitempty"` + LoadedInKernelTimestamp string `protobuf:"bytes,2,opt,name=LoadedInKernelTimestamp,proto3" json:"LoadedInKernelTimestamp,omitempty"` + Selector *WorkloadSelectorMessage `protobuf:"bytes,3,opt,name=Selector,proto3" json:"Selector,omitempty"` + ProfileCookie uint64 `protobuf:"varint,4,opt,name=ProfileCookie,proto3" json:"ProfileCookie,omitempty"` + AnomalyDetectionEvents []string `protobuf:"bytes,5,rep,name=AnomalyDetectionEvents,proto3" json:"AnomalyDetectionEvents,omitempty"` + LastAnomalies []*LastAnomalyTimestampMessage `protobuf:"bytes,6,rep,name=LastAnomalies,proto3" json:"LastAnomalies,omitempty"` + Instances []*InstanceMessage `protobuf:"bytes,7,rep,name=Instances,proto3" json:"Instances,omitempty"` + Status string `protobuf:"bytes,8,opt,name=Status,proto3" json:"Status,omitempty"` + Version string `protobuf:"bytes,9,opt,name=Version,proto3" json:"Version,omitempty"` + Metadata *MetadataMessage `protobuf:"bytes,10,opt,name=Metadata,proto3" json:"Metadata,omitempty"` + Tags []string `protobuf:"bytes,11,rep,name=Tags,proto3" json:"Tags,omitempty"` + Stats *ActivityTreeStatsMessage `protobuf:"bytes,12,opt,name=Stats,proto3" json:"Stats,omitempty"` +} + +func (x *SecurityProfileMessage) Reset() { + *x = SecurityProfileMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_security_proto_api_api_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecurityProfileMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityProfileMessage) ProtoMessage() {} + +func (x *SecurityProfileMessage) ProtoReflect() protoreflect.Message { + mi := &file_pkg_security_proto_api_api_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecurityProfileMessage.ProtoReflect.Descriptor instead. +func (*SecurityProfileMessage) Descriptor() ([]byte, []int) { + return file_pkg_security_proto_api_api_proto_rawDescGZIP(), []int{37} +} + +func (x *SecurityProfileMessage) GetLoadedInKernel() bool { + if x != nil { + return x.LoadedInKernel + } + return false +} + +func (x *SecurityProfileMessage) GetLoadedInKernelTimestamp() string { + if x != nil { + return x.LoadedInKernelTimestamp + } + return "" +} + +func (x *SecurityProfileMessage) GetSelector() *WorkloadSelectorMessage { + if x != nil { + return x.Selector + } + return nil +} + +func (x *SecurityProfileMessage) GetProfileCookie() uint64 { + if x != nil { + return x.ProfileCookie + } + return 0 +} + +func (x *SecurityProfileMessage) GetAnomalyDetectionEvents() []string { + if x != nil { + return x.AnomalyDetectionEvents + } + return nil +} + +func (x *SecurityProfileMessage) GetLastAnomalies() []*LastAnomalyTimestampMessage { + if x != nil { + return x.LastAnomalies + } + return nil +} + +func (x *SecurityProfileMessage) GetInstances() []*InstanceMessage { + if x != nil { + return x.Instances + } + return nil +} + +func (x *SecurityProfileMessage) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +func (x *SecurityProfileMessage) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *SecurityProfileMessage) GetMetadata() *MetadataMessage { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *SecurityProfileMessage) GetTags() []string { + if x != nil { + return x.Tags + } + return nil +} + +func (x *SecurityProfileMessage) GetStats() *ActivityTreeStatsMessage { + if x != nil { + return x.Stats + } + return nil +} + +type SecurityProfileListParams struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IncludeCache bool `protobuf:"varint,1,opt,name=IncludeCache,proto3" json:"IncludeCache,omitempty"` +} + +func (x *SecurityProfileListParams) Reset() { + *x = SecurityProfileListParams{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_security_proto_api_api_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecurityProfileListParams) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityProfileListParams) ProtoMessage() {} + +func (x *SecurityProfileListParams) ProtoReflect() protoreflect.Message { + mi := &file_pkg_security_proto_api_api_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecurityProfileListParams.ProtoReflect.Descriptor instead. +func (*SecurityProfileListParams) Descriptor() ([]byte, []int) { + return file_pkg_security_proto_api_api_proto_rawDescGZIP(), []int{38} +} + +func (x *SecurityProfileListParams) GetIncludeCache() bool { + if x != nil { + return x.IncludeCache + } + return false +} + +type SecurityProfileListMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Profiles []*SecurityProfileMessage `protobuf:"bytes,1,rep,name=Profiles,proto3" json:"Profiles,omitempty"` + Error string `protobuf:"bytes,2,opt,name=Error,proto3" json:"Error,omitempty"` +} + +func (x *SecurityProfileListMessage) Reset() { + *x = SecurityProfileListMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_security_proto_api_api_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecurityProfileListMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityProfileListMessage) ProtoMessage() {} + +func (x *SecurityProfileListMessage) ProtoReflect() protoreflect.Message { + mi := &file_pkg_security_proto_api_api_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecurityProfileListMessage.ProtoReflect.Descriptor instead. +func (*SecurityProfileListMessage) Descriptor() ([]byte, []int) { + return file_pkg_security_proto_api_api_proto_rawDescGZIP(), []int{39} +} + +func (x *SecurityProfileListMessage) GetProfiles() []*SecurityProfileMessage { + if x != nil { + return x.Profiles + } + return nil +} + +func (x *SecurityProfileListMessage) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type SecurityProfileSaveParams struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Selector *WorkloadSelectorMessage `protobuf:"bytes,1,opt,name=Selector,proto3" json:"Selector,omitempty"` +} + +func (x *SecurityProfileSaveParams) Reset() { + *x = SecurityProfileSaveParams{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_security_proto_api_api_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecurityProfileSaveParams) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityProfileSaveParams) ProtoMessage() {} + +func (x *SecurityProfileSaveParams) ProtoReflect() protoreflect.Message { + mi := &file_pkg_security_proto_api_api_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecurityProfileSaveParams.ProtoReflect.Descriptor instead. +func (*SecurityProfileSaveParams) Descriptor() ([]byte, []int) { + return file_pkg_security_proto_api_api_proto_rawDescGZIP(), []int{40} +} + +func (x *SecurityProfileSaveParams) GetSelector() *WorkloadSelectorMessage { + if x != nil { + return x.Selector + } + return nil +} + +type SecurityProfileSaveMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=Error,proto3" json:"Error,omitempty"` + File string `protobuf:"bytes,2,opt,name=File,proto3" json:"File,omitempty"` +} + +func (x *SecurityProfileSaveMessage) Reset() { + *x = SecurityProfileSaveMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_security_proto_api_api_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecurityProfileSaveMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityProfileSaveMessage) ProtoMessage() {} + +func (x *SecurityProfileSaveMessage) ProtoReflect() protoreflect.Message { + mi := &file_pkg_security_proto_api_api_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecurityProfileSaveMessage.ProtoReflect.Descriptor instead. +func (*SecurityProfileSaveMessage) Descriptor() ([]byte, []int) { + return file_pkg_security_proto_api_api_proto_rawDescGZIP(), []int{41} +} + +func (x *SecurityProfileSaveMessage) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +func (x *SecurityProfileSaveMessage) GetFile() string { + if x != nil { + return x.File + } + return "" +} + var File_pkg_security_proto_api_api_proto protoreflect.FileDescriptor var file_pkg_security_proto_api_api_proto_rawDesc = []byte{ @@ -2053,7 +2644,7 @@ var file_pkg_security_proto_api_api_proto_rawDesc = []byte{ 0x6e, 0x22, 0xc7, 0x01, 0x0a, 0x12, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x43, 0x6f, 0x6d, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x43, 0x6f, 0x6d, 0x6d, 0x12, 0x18, 0x0a, 0x07, - 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x54, + 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x44, 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x44, 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x74, 0x65, @@ -2062,167 +2653,266 @@ var file_pkg_security_proto_api_api_proto_rawDesc = []byte{ 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x22, 0xd7, 0x03, 0x0a, 0x1b, - 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x41, - 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x20, 0x0a, 0x0b, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, - 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x4b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x4b, 0x65, 0x72, 0x6e, 0x65, 0x6c, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x11, 0x4c, 0x69, 0x6e, 0x75, 0x78, - 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x11, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x41, 0x72, 0x63, 0x68, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x41, 0x72, 0x63, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, - 0x0f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x11, 0x44, 0x69, 0x66, 0x66, 0x65, - 0x72, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x11, 0x44, 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x74, - 0x65, 0x41, 0x72, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x43, 0x6f, 0x6d, 0x6d, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x43, 0x6f, 0x6d, 0x6d, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x12, 0x18, 0x0a, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0c, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x53, - 0x69, 0x7a, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x53, 0x69, 0x7a, 0x65, 0x12, - 0x24, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x79, 0x0a, 0x15, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, - 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x6f, - 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0b, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, - 0x46, 0x69, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x46, 0x69, 0x6c, 0x65, - 0x22, 0x95, 0x02, 0x0a, 0x13, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, - 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x48, 0x6f, 0x73, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, - 0x0a, 0x04, 0x54, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x54, 0x61, - 0x67, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, - 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x3c, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x44, 0x4e, 0x53, 0x4e, 0x61, 0x6d, - 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x44, 0x4e, 0x53, 0x4e, 0x61, 0x6d, - 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x18, 0x0a, 0x16, 0x41, 0x63, 0x74, 0x69, - 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x22, 0x5f, 0x0a, 0x17, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, - 0x6d, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, - 0x05, 0x44, 0x75, 0x6d, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x05, 0x44, 0x75, 0x6d, 0x70, 0x73, 0x12, 0x14, 0x0a, - 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, - 0x72, 0x6f, 0x72, 0x22, 0x62, 0x0a, 0x16, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, - 0x75, 0x6d, 0x70, 0x53, 0x74, 0x6f, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x12, 0x0a, - 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x43, 0x6f, 0x6d, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x43, 0x6f, 0x6d, 0x6d, 0x22, 0x2f, 0x0a, 0x17, 0x41, 0x63, 0x74, 0x69, 0x76, - 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x6f, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x7b, 0x0a, 0x18, 0x54, 0x72, 0x61, 0x6e, - 0x73, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x12, 0x2a, 0x0a, 0x10, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, - 0x44, 0x75, 0x6d, 0x70, 0x46, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, + 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x22, 0xcb, 0x03, 0x0a, 0x0f, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x22, 0x0a, 0x0c, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x6d, + 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x43, + 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x4b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x4b, 0x65, + 0x72, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x11, 0x4c, + 0x69, 0x6e, 0x75, 0x78, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x44, 0x69, 0x73, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x41, 0x72, 0x63, + 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x41, 0x72, 0x63, 0x68, 0x12, 0x12, 0x0a, + 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x11, 0x44, + 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x44, 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x74, 0x69, 0x61, 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x43, 0x6f, 0x6d, + 0x6d, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x43, 0x6f, 0x6d, 0x6d, 0x12, 0x20, 0x0a, + 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, + 0x14, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, + 0x12, 0x0a, 0x04, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x53, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x79, 0x0a, 0x15, 0x53, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x20, + 0x0a, 0x0b, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0b, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x12, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x46, 0x69, 0x6c, 0x65, 0x22, 0x89, 0x02, 0x0a, 0x13, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, + 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x48, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x48, 0x6f, 0x73, 0x74, + 0x12, 0x16, 0x0a, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x04, 0x54, 0x61, 0x67, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x52, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x08, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1a, + 0x0a, 0x08, 0x44, 0x4e, 0x53, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x08, 0x44, 0x4e, 0x53, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, + 0x22, 0x18, 0x0a, 0x16, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, + 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x5f, 0x0a, 0x17, 0x41, 0x63, + 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x44, 0x75, 0x6d, 0x70, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, + 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x05, + 0x44, 0x75, 0x6d, 0x70, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x62, 0x0a, 0x16, 0x41, + 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x6f, 0x70, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x43, + 0x6f, 0x6d, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x43, 0x6f, 0x6d, 0x6d, 0x22, + 0x2f, 0x0a, 0x17, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, + 0x74, 0x6f, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, + 0x22, 0x7b, 0x0a, 0x18, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x2a, 0x0a, 0x10, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x46, 0x69, 0x6c, 0x65, - 0x12, 0x33, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x07, 0x53, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x22, 0x67, 0x0a, 0x19, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x6f, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, + 0x44, 0x75, 0x6d, 0x70, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x52, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x22, 0x67, 0x0a, + 0x19, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, + 0x12, 0x34, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x07, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x22, 0x1a, 0x0a, 0x18, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, + 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x22, 0x5d, 0x0a, 0x19, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, + 0x6d, 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x2c, 0x0a, 0x04, 0x44, 0x75, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x04, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x12, 0x0a, + 0x04, 0x44, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x44, 0x61, 0x74, + 0x61, 0x22, 0x3f, 0x0a, 0x17, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x10, 0x0a, 0x03, 0x54, 0x61, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x54, + 0x61, 0x67, 0x22, 0x87, 0x01, 0x0a, 0x1b, 0x4c, 0x61, 0x73, 0x74, 0x41, 0x6e, 0x6f, 0x6d, 0x61, + 0x6c, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x1c, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2c, + 0x0a, 0x11, 0x49, 0x73, 0x53, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x49, 0x73, 0x53, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0x47, 0x0a, 0x0f, + 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x20, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, + 0x44, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x61, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x04, 0x54, 0x61, 0x67, 0x73, 0x22, 0xec, 0x01, 0x0a, 0x18, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, + 0x74, 0x79, 0x54, 0x72, 0x65, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x2c, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x4e, 0x6f, 0x64, + 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x50, + 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x12, 0x26, 0x0a, 0x0e, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x6f, + 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x44, 0x4e, 0x53, 0x4e, + 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0d, 0x44, 0x4e, 0x53, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2a, + 0x0a, 0x10, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x41, 0x70, + 0x70, 0x72, 0x6f, 0x78, 0x69, 0x6d, 0x61, 0x74, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0f, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x78, 0x69, 0x6d, 0x61, 0x74, 0x65, + 0x53, 0x69, 0x7a, 0x65, 0x22, 0xbb, 0x04, 0x0a, 0x16, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x26, 0x0a, 0x0e, 0x4c, 0x6f, 0x61, 0x64, 0x65, 0x64, 0x49, 0x6e, 0x4b, 0x65, 0x72, 0x6e, 0x65, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x4c, 0x6f, 0x61, 0x64, 0x65, 0x64, 0x49, + 0x6e, 0x4b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x12, 0x38, 0x0a, 0x17, 0x4c, 0x6f, 0x61, 0x64, 0x65, + 0x64, 0x49, 0x6e, 0x4b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x4c, 0x6f, 0x61, 0x64, 0x65, 0x64, + 0x49, 0x6e, 0x4b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x12, 0x38, 0x0a, 0x08, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, + 0x61, 0x64, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x52, 0x08, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x24, 0x0a, 0x0d, 0x50, + 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0d, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6f, 0x6b, 0x69, + 0x65, 0x12, 0x36, 0x0a, 0x16, 0x41, 0x6e, 0x6f, 0x6d, 0x61, 0x6c, 0x79, 0x44, 0x65, 0x74, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x16, 0x41, 0x6e, 0x6f, 0x6d, 0x61, 0x6c, 0x79, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x46, 0x0a, 0x0d, 0x4c, 0x61, 0x73, + 0x74, 0x41, 0x6e, 0x6f, 0x6d, 0x61, 0x6c, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x73, 0x74, 0x41, 0x6e, 0x6f, 0x6d, 0x61, + 0x6c, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x52, 0x0d, 0x4c, 0x61, 0x73, 0x74, 0x41, 0x6e, 0x6f, 0x6d, 0x61, 0x6c, 0x69, 0x65, + 0x73, 0x12, 0x32, 0x0a, 0x09, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x09, 0x49, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, + 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, + 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x61, 0x67, + 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x54, 0x61, 0x67, 0x73, 0x12, 0x33, 0x0a, + 0x05, 0x53, 0x74, 0x61, 0x74, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x72, 0x65, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x05, 0x53, 0x74, 0x61, + 0x74, 0x73, 0x22, 0x3f, 0x0a, 0x19, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, + 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, + 0x22, 0x0a, 0x0c, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x43, 0x61, + 0x63, 0x68, 0x65, 0x22, 0x6b, 0x0a, 0x1a, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, + 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x37, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x52, 0x08, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, + 0x22, 0x55, 0x0a, 0x19, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, + 0x69, 0x6c, 0x65, 0x53, 0x61, 0x76, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x38, 0x0a, + 0x08, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x22, 0x46, 0x0a, 0x1a, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x61, 0x76, 0x65, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x46, + 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x32, + 0xb3, 0x09, 0x0a, 0x0e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x4d, 0x6f, 0x64, 0x75, + 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, + 0x13, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x19, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, + 0x00, 0x30, 0x01, 0x12, 0x57, 0x0a, 0x10, 0x44, 0x75, 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x63, 0x65, + 0x73, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x1b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x75, + 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x24, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x43, 0x61, + 0x63, 0x68, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x09, + 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, + 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x30, 0x0a, + 0x09, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x1a, 0x0b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, + 0x4b, 0x0a, 0x0b, 0x52, 0x75, 0x6e, 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, 0x74, 0x12, 0x16, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x75, 0x6e, 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, 0x74, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x22, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, 0x74, 0x52, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0e, + 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x19, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x20, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, + 0x14, 0x44, 0x75, 0x6d, 0x70, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x75, 0x6d, 0x70, + 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x20, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x75, 0x6d, + 0x70, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x0e, 0x44, 0x75, + 0x6d, 0x70, 0x44, 0x69, 0x73, 0x63, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, 0x19, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x44, 0x69, 0x73, 0x63, 0x61, 0x72, 0x64, 0x65, 0x72, + 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x75, + 0x6d, 0x70, 0x44, 0x69, 0x73, 0x63, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0c, 0x44, 0x75, 0x6d, 0x70, 0x41, 0x63, 0x74, + 0x69, 0x76, 0x69, 0x74, 0x79, 0x12, 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, + 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x18, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, + 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x50, 0x0a, 0x11, 0x4c, 0x69, + 0x73, 0x74, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x73, 0x12, + 0x1b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, + 0x6d, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1c, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4c, + 0x69, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x10, + 0x53, 0x74, 0x6f, 0x70, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, + 0x12, 0x1b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, + 0x75, 0x6d, 0x70, 0x53, 0x74, 0x6f, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1c, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, + 0x53, 0x74, 0x6f, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, + 0x12, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x1a, 0x1e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x34, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x22, 0x1a, - 0x0a, 0x18, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x5d, 0x0a, 0x19, 0x41, 0x63, - 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, 0x04, 0x44, 0x75, 0x6d, 0x70, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, - 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, - 0x04, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x04, 0x44, 0x61, 0x74, 0x61, 0x32, 0xfe, 0x07, 0x0a, 0x0e, 0x53, 0x65, - 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x09, - 0x47, 0x65, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x13, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x47, 0x65, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x19, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x57, 0x0a, - 0x10, 0x44, 0x75, 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x43, 0x61, 0x63, 0x68, - 0x65, 0x12, 0x1b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x63, - 0x65, 0x73, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x24, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, - 0x70, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x30, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x0b, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0b, 0x52, 0x75, 0x6e, - 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, 0x74, 0x12, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, - 0x75, 0x6e, 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x1a, 0x22, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, - 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x19, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, - 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x1a, 0x20, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x14, 0x44, 0x75, 0x6d, 0x70, 0x4e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, - 0x1f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x1a, 0x20, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x4e, 0x65, 0x74, 0x77, 0x6f, - 0x72, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x0e, 0x44, 0x75, 0x6d, 0x70, 0x44, 0x69, 0x73, 0x63, - 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, 0x19, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x75, 0x6d, - 0x70, 0x44, 0x69, 0x73, 0x63, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x1a, 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x44, 0x69, 0x73, 0x63, - 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, - 0x43, 0x0a, 0x0c, 0x44, 0x75, 0x6d, 0x70, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x12, - 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, - 0x6d, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x18, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, - 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x22, 0x00, 0x12, 0x50, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x74, 0x69, - 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x73, 0x12, 0x1b, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4c, 0x69, 0x73, 0x74, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, - 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x10, 0x53, 0x74, 0x6f, 0x70, 0x41, 0x63, - 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x1b, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x6f, - 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, - 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x6f, 0x70, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x12, 0x54, 0x72, 0x61, 0x6e, 0x73, - 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1e, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x5a, - 0x0a, 0x15, 0x47, 0x65, 0x74, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, - 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x1d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, - 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, - 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x30, 0x01, 0x42, 0x18, 0x5a, 0x16, 0x70, 0x6b, - 0x67, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x61, 0x70, 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x67, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x41, 0x63, 0x74, 0x69, 0x76, + 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x1d, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1e, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x30, 0x01, + 0x12, 0x59, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4c, 0x69, + 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4c, 0x69, + 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x13, 0x53, + 0x61, 0x76, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x12, 0x1e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x61, 0x76, 0x65, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x1a, 0x1f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x61, 0x76, 0x65, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x22, 0x00, 0x42, 0x18, 0x5a, 0x16, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2237,7 +2927,7 @@ func file_pkg_security_proto_api_api_proto_rawDescGZIP() []byte { return file_pkg_security_proto_api_api_proto_rawDescData } -var file_pkg_security_proto_api_api_proto_msgTypes = make([]protoimpl.MessageInfo, 33) +var file_pkg_security_proto_api_api_proto_msgTypes = make([]protoimpl.MessageInfo, 42) var file_pkg_security_proto_api_api_proto_goTypes = []interface{}{ (*GetEventParams)(nil), // 0: api.GetEventParams (*SecurityEventMessage)(nil), // 1: api.SecurityEventMessage @@ -2261,7 +2951,7 @@ var file_pkg_security_proto_api_api_proto_goTypes = []interface{}{ (*DumpDiscardersMessage)(nil), // 19: api.DumpDiscardersMessage (*StorageRequestParams)(nil), // 20: api.StorageRequestParams (*ActivityDumpParams)(nil), // 21: api.ActivityDumpParams - (*ActivityDumpMetadataMessage)(nil), // 22: api.ActivityDumpMetadataMessage + (*MetadataMessage)(nil), // 22: api.MetadataMessage (*StorageRequestMessage)(nil), // 23: api.StorageRequestMessage (*ActivityDumpMessage)(nil), // 24: api.ActivityDumpMessage (*ActivityDumpListParams)(nil), // 25: api.ActivityDumpListParams @@ -2272,6 +2962,15 @@ var file_pkg_security_proto_api_api_proto_goTypes = []interface{}{ (*TranscodingRequestMessage)(nil), // 30: api.TranscodingRequestMessage (*ActivityDumpStreamParams)(nil), // 31: api.ActivityDumpStreamParams (*ActivityDumpStreamMessage)(nil), // 32: api.ActivityDumpStreamMessage + (*WorkloadSelectorMessage)(nil), // 33: api.WorkloadSelectorMessage + (*LastAnomalyTimestampMessage)(nil), // 34: api.LastAnomalyTimestampMessage + (*InstanceMessage)(nil), // 35: api.InstanceMessage + (*ActivityTreeStatsMessage)(nil), // 36: api.ActivityTreeStatsMessage + (*SecurityProfileMessage)(nil), // 37: api.SecurityProfileMessage + (*SecurityProfileListParams)(nil), // 38: api.SecurityProfileListParams + (*SecurityProfileListMessage)(nil), // 39: api.SecurityProfileListMessage + (*SecurityProfileSaveParams)(nil), // 40: api.SecurityProfileSaveParams + (*SecurityProfileSaveMessage)(nil), // 41: api.SecurityProfileSaveMessage } var file_pkg_security_proto_api_api_proto_depIdxs = []int32{ 17, // 0: api.Status.Environment:type_name -> api.EnvironmentStatus @@ -2280,42 +2979,53 @@ var file_pkg_security_proto_api_api_proto_depIdxs = []int32{ 16, // 3: api.EnvironmentStatus.Constants:type_name -> api.ConstantFetcherStatus 20, // 4: api.ActivityDumpParams.Storage:type_name -> api.StorageRequestParams 23, // 5: api.ActivityDumpMessage.Storage:type_name -> api.StorageRequestMessage - 22, // 6: api.ActivityDumpMessage.Metadata:type_name -> api.ActivityDumpMetadataMessage + 22, // 6: api.ActivityDumpMessage.Metadata:type_name -> api.MetadataMessage 24, // 7: api.ActivityDumpListMessage.Dumps:type_name -> api.ActivityDumpMessage 20, // 8: api.TranscodingRequestParams.Storage:type_name -> api.StorageRequestParams 23, // 9: api.TranscodingRequestMessage.Storage:type_name -> api.StorageRequestMessage 24, // 10: api.ActivityDumpStreamMessage.Dump:type_name -> api.ActivityDumpMessage - 0, // 11: api.SecurityModule.GetEvents:input_type -> api.GetEventParams - 2, // 12: api.SecurityModule.DumpProcessCache:input_type -> api.DumpProcessCacheParams - 6, // 13: api.SecurityModule.GetConfig:input_type -> api.GetConfigParams - 12, // 14: api.SecurityModule.GetStatus:input_type -> api.GetStatusParams - 8, // 15: api.SecurityModule.RunSelfTest:input_type -> api.RunSelfTestParams - 9, // 16: api.SecurityModule.ReloadPolicies:input_type -> api.ReloadPoliciesParams - 4, // 17: api.SecurityModule.DumpNetworkNamespace:input_type -> api.DumpNetworkNamespaceParams - 18, // 18: api.SecurityModule.DumpDiscarders:input_type -> api.DumpDiscardersParams - 21, // 19: api.SecurityModule.DumpActivity:input_type -> api.ActivityDumpParams - 25, // 20: api.SecurityModule.ListActivityDumps:input_type -> api.ActivityDumpListParams - 27, // 21: api.SecurityModule.StopActivityDump:input_type -> api.ActivityDumpStopParams - 29, // 22: api.SecurityModule.TranscodingRequest:input_type -> api.TranscodingRequestParams - 31, // 23: api.SecurityModule.GetActivityDumpStream:input_type -> api.ActivityDumpStreamParams - 1, // 24: api.SecurityModule.GetEvents:output_type -> api.SecurityEventMessage - 3, // 25: api.SecurityModule.DumpProcessCache:output_type -> api.SecurityDumpProcessCacheMessage - 7, // 26: api.SecurityModule.GetConfig:output_type -> api.SecurityConfigMessage - 15, // 27: api.SecurityModule.GetStatus:output_type -> api.Status - 11, // 28: api.SecurityModule.RunSelfTest:output_type -> api.SecuritySelfTestResultMessage - 10, // 29: api.SecurityModule.ReloadPolicies:output_type -> api.ReloadPoliciesResultMessage - 5, // 30: api.SecurityModule.DumpNetworkNamespace:output_type -> api.DumpNetworkNamespaceMessage - 19, // 31: api.SecurityModule.DumpDiscarders:output_type -> api.DumpDiscardersMessage - 24, // 32: api.SecurityModule.DumpActivity:output_type -> api.ActivityDumpMessage - 26, // 33: api.SecurityModule.ListActivityDumps:output_type -> api.ActivityDumpListMessage - 28, // 34: api.SecurityModule.StopActivityDump:output_type -> api.ActivityDumpStopMessage - 30, // 35: api.SecurityModule.TranscodingRequest:output_type -> api.TranscodingRequestMessage - 32, // 36: api.SecurityModule.GetActivityDumpStream:output_type -> api.ActivityDumpStreamMessage - 24, // [24:37] is the sub-list for method output_type - 11, // [11:24] is the sub-list for method input_type - 11, // [11:11] is the sub-list for extension type_name - 11, // [11:11] is the sub-list for extension extendee - 0, // [0:11] is the sub-list for field type_name + 33, // 11: api.SecurityProfileMessage.Selector:type_name -> api.WorkloadSelectorMessage + 34, // 12: api.SecurityProfileMessage.LastAnomalies:type_name -> api.LastAnomalyTimestampMessage + 35, // 13: api.SecurityProfileMessage.Instances:type_name -> api.InstanceMessage + 22, // 14: api.SecurityProfileMessage.Metadata:type_name -> api.MetadataMessage + 36, // 15: api.SecurityProfileMessage.Stats:type_name -> api.ActivityTreeStatsMessage + 37, // 16: api.SecurityProfileListMessage.Profiles:type_name -> api.SecurityProfileMessage + 33, // 17: api.SecurityProfileSaveParams.Selector:type_name -> api.WorkloadSelectorMessage + 0, // 18: api.SecurityModule.GetEvents:input_type -> api.GetEventParams + 2, // 19: api.SecurityModule.DumpProcessCache:input_type -> api.DumpProcessCacheParams + 6, // 20: api.SecurityModule.GetConfig:input_type -> api.GetConfigParams + 12, // 21: api.SecurityModule.GetStatus:input_type -> api.GetStatusParams + 8, // 22: api.SecurityModule.RunSelfTest:input_type -> api.RunSelfTestParams + 9, // 23: api.SecurityModule.ReloadPolicies:input_type -> api.ReloadPoliciesParams + 4, // 24: api.SecurityModule.DumpNetworkNamespace:input_type -> api.DumpNetworkNamespaceParams + 18, // 25: api.SecurityModule.DumpDiscarders:input_type -> api.DumpDiscardersParams + 21, // 26: api.SecurityModule.DumpActivity:input_type -> api.ActivityDumpParams + 25, // 27: api.SecurityModule.ListActivityDumps:input_type -> api.ActivityDumpListParams + 27, // 28: api.SecurityModule.StopActivityDump:input_type -> api.ActivityDumpStopParams + 29, // 29: api.SecurityModule.TranscodingRequest:input_type -> api.TranscodingRequestParams + 31, // 30: api.SecurityModule.GetActivityDumpStream:input_type -> api.ActivityDumpStreamParams + 38, // 31: api.SecurityModule.ListSecurityProfiles:input_type -> api.SecurityProfileListParams + 40, // 32: api.SecurityModule.SaveSecurityProfile:input_type -> api.SecurityProfileSaveParams + 1, // 33: api.SecurityModule.GetEvents:output_type -> api.SecurityEventMessage + 3, // 34: api.SecurityModule.DumpProcessCache:output_type -> api.SecurityDumpProcessCacheMessage + 7, // 35: api.SecurityModule.GetConfig:output_type -> api.SecurityConfigMessage + 15, // 36: api.SecurityModule.GetStatus:output_type -> api.Status + 11, // 37: api.SecurityModule.RunSelfTest:output_type -> api.SecuritySelfTestResultMessage + 10, // 38: api.SecurityModule.ReloadPolicies:output_type -> api.ReloadPoliciesResultMessage + 5, // 39: api.SecurityModule.DumpNetworkNamespace:output_type -> api.DumpNetworkNamespaceMessage + 19, // 40: api.SecurityModule.DumpDiscarders:output_type -> api.DumpDiscardersMessage + 24, // 41: api.SecurityModule.DumpActivity:output_type -> api.ActivityDumpMessage + 26, // 42: api.SecurityModule.ListActivityDumps:output_type -> api.ActivityDumpListMessage + 28, // 43: api.SecurityModule.StopActivityDump:output_type -> api.ActivityDumpStopMessage + 30, // 44: api.SecurityModule.TranscodingRequest:output_type -> api.TranscodingRequestMessage + 32, // 45: api.SecurityModule.GetActivityDumpStream:output_type -> api.ActivityDumpStreamMessage + 39, // 46: api.SecurityModule.ListSecurityProfiles:output_type -> api.SecurityProfileListMessage + 41, // 47: api.SecurityModule.SaveSecurityProfile:output_type -> api.SecurityProfileSaveMessage + 33, // [33:48] is the sub-list for method output_type + 18, // [18:33] is the sub-list for method input_type + 18, // [18:18] is the sub-list for extension type_name + 18, // [18:18] is the sub-list for extension extendee + 0, // [0:18] is the sub-list for field type_name } func init() { file_pkg_security_proto_api_api_proto_init() } @@ -2589,7 +3299,7 @@ func file_pkg_security_proto_api_api_proto_init() { } } file_pkg_security_proto_api_api_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ActivityDumpMetadataMessage); i { + switch v := v.(*MetadataMessage); i { case 0: return &v.state case 1: @@ -2720,6 +3430,114 @@ func file_pkg_security_proto_api_api_proto_init() { return nil } } + file_pkg_security_proto_api_api_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkloadSelectorMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_security_proto_api_api_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LastAnomalyTimestampMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_security_proto_api_api_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InstanceMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_security_proto_api_api_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ActivityTreeStatsMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_security_proto_api_api_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecurityProfileMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_security_proto_api_api_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecurityProfileListParams); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_security_proto_api_api_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecurityProfileListMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_security_proto_api_api_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecurityProfileSaveParams); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_security_proto_api_api_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecurityProfileSaveMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -2727,7 +3545,7 @@ func file_pkg_security_proto_api_api_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_pkg_security_proto_api_api_proto_rawDesc, NumEnums: 0, - NumMessages: 33, + NumMessages: 42, NumExtensions: 0, NumServices: 1, }, diff --git a/pkg/security/proto/api/api.proto b/pkg/security/proto/api/api.proto index c8b08b5e1dbf5..821d2ccd6742a 100644 --- a/pkg/security/proto/api/api.proto +++ b/pkg/security/proto/api/api.proto @@ -101,13 +101,13 @@ message StorageRequestParams { message ActivityDumpParams { string Comm = 1; - int32 Timeout = 2; + string Timeout = 2; bool DifferentiateArgs = 4; StorageRequestParams Storage = 5; string ContainerID = 6; } -message ActivityDumpMetadataMessage { +message MetadataMessage { string AgentVersion = 1; string AgentCommit = 2; string KernelVersion = 3; @@ -138,7 +138,7 @@ message ActivityDumpMessage { string Service = 3; repeated string Tags = 4; repeated StorageRequestMessage Storage = 5; - ActivityDumpMetadataMessage Metadata = 6; + MetadataMessage Metadata = 6; repeated string DNSNames = 8; string Error = 7; } @@ -177,6 +177,63 @@ message ActivityDumpStreamMessage { bytes Data = 3; } +message WorkloadSelectorMessage { + string Name = 1; + string Tag = 2; +} + +message LastAnomalyTimestampMessage { + string EventType = 1; + string Timestamp = 2; + bool IsStableEventType = 3; +} + +message InstanceMessage { + string ContainerID = 1; + repeated string Tags = 2; +} + +message ActivityTreeStatsMessage { + int64 ProcessNodesCount = 1; + int64 FileNodesCount = 2; + int64 DNSNodesCount = 3; + int64 SocketNodesCount = 4; + int64 ApproximateSize = 5; +} + +message SecurityProfileMessage { + bool LoadedInKernel = 1; + string LoadedInKernelTimestamp = 2; + WorkloadSelectorMessage Selector = 3; + uint64 ProfileCookie = 4; + repeated string AnomalyDetectionEvents = 5; + repeated LastAnomalyTimestampMessage LastAnomalies = 6; + repeated InstanceMessage Instances = 7; + string Status = 8; + string Version = 9; + MetadataMessage Metadata = 10; + repeated string Tags = 11; + ActivityTreeStatsMessage Stats = 12; +} + +message SecurityProfileListParams { + bool IncludeCache = 1; +} + +message SecurityProfileListMessage { + repeated SecurityProfileMessage Profiles = 1; + string Error = 2; +} + +message SecurityProfileSaveParams { + WorkloadSelectorMessage Selector = 1; +} + +message SecurityProfileSaveMessage { + string Error = 1; + string File = 2; +} + service SecurityModule { rpc GetEvents(GetEventParams) returns (stream SecurityEventMessage) {} rpc DumpProcessCache(DumpProcessCacheParams) returns (SecurityDumpProcessCacheMessage) {} @@ -193,4 +250,8 @@ service SecurityModule { rpc StopActivityDump(ActivityDumpStopParams) returns (ActivityDumpStopMessage) {} rpc TranscodingRequest(TranscodingRequestParams) returns (TranscodingRequestMessage) {} rpc GetActivityDumpStream(ActivityDumpStreamParams) returns (stream ActivityDumpStreamMessage) {} + + // Security Profiles + rpc ListSecurityProfiles(SecurityProfileListParams) returns (SecurityProfileListMessage) {} + rpc SaveSecurityProfile(SecurityProfileSaveParams) returns (SecurityProfileSaveMessage) {} } diff --git a/pkg/security/proto/api/api_grpc.pb.go b/pkg/security/proto/api/api_grpc.pb.go index ed3099a3b3a5c..e1592289d3277 100644 --- a/pkg/security/proto/api/api_grpc.pb.go +++ b/pkg/security/proto/api/api_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 -// - protoc v3.12.4 +// - protoc v3.6.1 // source: pkg/security/proto/api/api.proto package api @@ -36,6 +36,9 @@ type SecurityModuleClient interface { StopActivityDump(ctx context.Context, in *ActivityDumpStopParams, opts ...grpc.CallOption) (*ActivityDumpStopMessage, error) TranscodingRequest(ctx context.Context, in *TranscodingRequestParams, opts ...grpc.CallOption) (*TranscodingRequestMessage, error) GetActivityDumpStream(ctx context.Context, in *ActivityDumpStreamParams, opts ...grpc.CallOption) (SecurityModule_GetActivityDumpStreamClient, error) + // Security Profiles + ListSecurityProfiles(ctx context.Context, in *SecurityProfileListParams, opts ...grpc.CallOption) (*SecurityProfileListMessage, error) + SaveSecurityProfile(ctx context.Context, in *SecurityProfileSaveParams, opts ...grpc.CallOption) (*SecurityProfileSaveMessage, error) } type securityModuleClient struct { @@ -209,6 +212,24 @@ func (x *securityModuleGetActivityDumpStreamClient) Recv() (*ActivityDumpStreamM return m, nil } +func (c *securityModuleClient) ListSecurityProfiles(ctx context.Context, in *SecurityProfileListParams, opts ...grpc.CallOption) (*SecurityProfileListMessage, error) { + out := new(SecurityProfileListMessage) + err := c.cc.Invoke(ctx, "/api.SecurityModule/ListSecurityProfiles", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *securityModuleClient) SaveSecurityProfile(ctx context.Context, in *SecurityProfileSaveParams, opts ...grpc.CallOption) (*SecurityProfileSaveMessage, error) { + out := new(SecurityProfileSaveMessage) + err := c.cc.Invoke(ctx, "/api.SecurityModule/SaveSecurityProfile", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // SecurityModuleServer is the server API for SecurityModule service. // All implementations must embed UnimplementedSecurityModuleServer // for forward compatibility @@ -227,6 +248,9 @@ type SecurityModuleServer interface { StopActivityDump(context.Context, *ActivityDumpStopParams) (*ActivityDumpStopMessage, error) TranscodingRequest(context.Context, *TranscodingRequestParams) (*TranscodingRequestMessage, error) GetActivityDumpStream(*ActivityDumpStreamParams, SecurityModule_GetActivityDumpStreamServer) error + // Security Profiles + ListSecurityProfiles(context.Context, *SecurityProfileListParams) (*SecurityProfileListMessage, error) + SaveSecurityProfile(context.Context, *SecurityProfileSaveParams) (*SecurityProfileSaveMessage, error) mustEmbedUnimplementedSecurityModuleServer() } @@ -273,6 +297,12 @@ func (UnimplementedSecurityModuleServer) TranscodingRequest(context.Context, *Tr func (UnimplementedSecurityModuleServer) GetActivityDumpStream(*ActivityDumpStreamParams, SecurityModule_GetActivityDumpStreamServer) error { return status.Errorf(codes.Unimplemented, "method GetActivityDumpStream not implemented") } +func (UnimplementedSecurityModuleServer) ListSecurityProfiles(context.Context, *SecurityProfileListParams) (*SecurityProfileListMessage, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListSecurityProfiles not implemented") +} +func (UnimplementedSecurityModuleServer) SaveSecurityProfile(context.Context, *SecurityProfileSaveParams) (*SecurityProfileSaveMessage, error) { + return nil, status.Errorf(codes.Unimplemented, "method SaveSecurityProfile not implemented") +} func (UnimplementedSecurityModuleServer) mustEmbedUnimplementedSecurityModuleServer() {} // UnsafeSecurityModuleServer may be embedded to opt out of forward compatibility for this service. @@ -526,6 +556,42 @@ func (x *securityModuleGetActivityDumpStreamServer) Send(m *ActivityDumpStreamMe return x.ServerStream.SendMsg(m) } +func _SecurityModule_ListSecurityProfiles_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SecurityProfileListParams) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SecurityModuleServer).ListSecurityProfiles(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/api.SecurityModule/ListSecurityProfiles", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SecurityModuleServer).ListSecurityProfiles(ctx, req.(*SecurityProfileListParams)) + } + return interceptor(ctx, in, info, handler) +} + +func _SecurityModule_SaveSecurityProfile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SecurityProfileSaveParams) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SecurityModuleServer).SaveSecurityProfile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/api.SecurityModule/SaveSecurityProfile", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SecurityModuleServer).SaveSecurityProfile(ctx, req.(*SecurityProfileSaveParams)) + } + return interceptor(ctx, in, info, handler) +} + // SecurityModule_ServiceDesc is the grpc.ServiceDesc for SecurityModule service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -577,6 +643,14 @@ var SecurityModule_ServiceDesc = grpc.ServiceDesc{ MethodName: "TranscodingRequest", Handler: _SecurityModule_TranscodingRequest_Handler, }, + { + MethodName: "ListSecurityProfiles", + Handler: _SecurityModule_ListSecurityProfiles_Handler, + }, + { + MethodName: "SaveSecurityProfile", + Handler: _SecurityModule_SaveSecurityProfile_Handler, + }, }, Streams: []grpc.StreamDesc{ { diff --git a/pkg/security/proto/api/api_vtproto.pb.go b/pkg/security/proto/api/api_vtproto.pb.go index edf1ea786cc24..5a7d4c2aff0c9 100644 --- a/pkg/security/proto/api/api_vtproto.pb.go +++ b/pkg/security/proto/api/api_vtproto.pb.go @@ -1076,10 +1076,12 @@ func (m *ActivityDumpParams) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i-- dAtA[i] = 0x20 } - if m.Timeout != 0 { - i = encodeVarint(dAtA, i, uint64(m.Timeout)) + if len(m.Timeout) > 0 { + i -= len(m.Timeout) + copy(dAtA[i:], m.Timeout) + i = encodeVarint(dAtA, i, uint64(len(m.Timeout))) i-- - dAtA[i] = 0x10 + dAtA[i] = 0x12 } if len(m.Comm) > 0 { i -= len(m.Comm) @@ -1091,7 +1093,7 @@ func (m *ActivityDumpParams) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ActivityDumpMetadataMessage) MarshalVT() (dAtA []byte, err error) { +func (m *MetadataMessage) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -1104,12 +1106,12 @@ func (m *ActivityDumpMetadataMessage) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ActivityDumpMetadataMessage) MarshalToVT(dAtA []byte) (int, error) { +func (m *MetadataMessage) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ActivityDumpMetadataMessage) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *MetadataMessage) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -1752,542 +1754,655 @@ func (m *ActivityDumpStreamMessage) MarshalToSizedBufferVT(dAtA []byte) (int, er return len(dAtA) - i, nil } -func encodeVarint(dAtA []byte, offset int, v uint64) int { - offset -= sov(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GetEventParams) SizeVT() (n int) { +func (m *WorkloadSelectorMessage) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *SecurityEventMessage) SizeVT() (n int) { +func (m *WorkloadSelectorMessage) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkloadSelectorMessage) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.RuleID) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Data) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.Tags) > 0 { - for _, s := range m.Tags { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + if len(m.Tag) > 0 { + i -= len(m.Tag) + copy(dAtA[i:], m.Tag) + i = encodeVarint(dAtA, i, uint64(len(m.Tag))) + i-- + dAtA[i] = 0x12 } - l = len(m.Service) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *DumpProcessCacheParams) SizeVT() (n int) { +func (m *LastAnomalyTimestampMessage) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.WithArgs { - n += 2 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *SecurityDumpProcessCacheMessage) SizeVT() (n int) { +func (m *LastAnomalyTimestampMessage) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *LastAnomalyTimestampMessage) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Filename) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + if m.IsStableEventType { + i-- + if m.IsStableEventType { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Timestamp) > 0 { + i -= len(m.Timestamp) + copy(dAtA[i:], m.Timestamp) + i = encodeVarint(dAtA, i, uint64(len(m.Timestamp))) + i-- + dAtA[i] = 0x12 + } + if len(m.EventType) > 0 { + i -= len(m.EventType) + copy(dAtA[i:], m.EventType) + i = encodeVarint(dAtA, i, uint64(len(m.EventType))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *DumpNetworkNamespaceParams) SizeVT() (n int) { +func (m *InstanceMessage) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.SnapshotInterfaces { - n += 2 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *DumpNetworkNamespaceMessage) SizeVT() (n int) { +func (m *InstanceMessage) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *InstanceMessage) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Error) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.DumpFilename) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Tags) > 0 { + for iNdEx := len(m.Tags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tags[iNdEx]) + copy(dAtA[i:], m.Tags[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Tags[iNdEx]))) + i-- + dAtA[i] = 0x12 + } } - l = len(m.GraphFilename) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.ContainerID) > 0 { + i -= len(m.ContainerID) + copy(dAtA[i:], m.ContainerID) + i = encodeVarint(dAtA, i, uint64(len(m.ContainerID))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *GetConfigParams) SizeVT() (n int) { +func (m *ActivityTreeStatsMessage) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *SecurityConfigMessage) SizeVT() (n int) { +func (m *ActivityTreeStatsMessage) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ActivityTreeStatsMessage) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.RuntimeEnabled { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.FIMEnabled { - n += 2 + if m.ApproximateSize != 0 { + i = encodeVarint(dAtA, i, uint64(m.ApproximateSize)) + i-- + dAtA[i] = 0x28 } - if m.ActivityDumpEnabled { - n += 2 + if m.SocketNodesCount != 0 { + i = encodeVarint(dAtA, i, uint64(m.SocketNodesCount)) + i-- + dAtA[i] = 0x20 } - n += len(m.unknownFields) - return n -} - -func (m *RunSelfTestParams) SizeVT() (n int) { - if m == nil { - return 0 + if m.DNSNodesCount != 0 { + i = encodeVarint(dAtA, i, uint64(m.DNSNodesCount)) + i-- + dAtA[i] = 0x18 } - var l int - _ = l - n += len(m.unknownFields) - return n + if m.FileNodesCount != 0 { + i = encodeVarint(dAtA, i, uint64(m.FileNodesCount)) + i-- + dAtA[i] = 0x10 + } + if m.ProcessNodesCount != 0 { + i = encodeVarint(dAtA, i, uint64(m.ProcessNodesCount)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } -func (m *ReloadPoliciesParams) SizeVT() (n int) { +func (m *SecurityProfileMessage) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ReloadPoliciesResultMessage) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *SecurityProfileMessage) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SecuritySelfTestResultMessage) SizeVT() (n int) { +func (m *SecurityProfileMessage) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.Ok { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Error) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.Stats != nil { + size, err := m.Stats.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 } - n += len(m.unknownFields) - return n -} - -func (m *GetStatusParams) SizeVT() (n int) { - if m == nil { - return 0 + if len(m.Tags) > 0 { + for iNdEx := len(m.Tags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tags[iNdEx]) + copy(dAtA[i:], m.Tags[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Tags[iNdEx]))) + i-- + dAtA[i] = 0x5a + } } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *ConstantValueAndSource) SizeVT() (n int) { - if m == nil { - return 0 + if m.Metadata != nil { + size, err := m.Metadata.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarint(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x4a } - if m.Value != 0 { - n += 1 + sov(uint64(m.Value)) + if len(m.Status) > 0 { + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarint(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x42 } - l = len(m.Source) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Instances) > 0 { + for iNdEx := len(m.Instances) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Instances[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } } - n += len(m.unknownFields) - return n -} - -func (m *SelfTestsStatus) SizeVT() (n int) { - if m == nil { - return 0 + if len(m.LastAnomalies) > 0 { + for iNdEx := len(m.LastAnomalies) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.LastAnomalies[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } } - var l int - _ = l - l = len(m.LastTimestamp) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.AnomalyDetectionEvents) > 0 { + for iNdEx := len(m.AnomalyDetectionEvents) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AnomalyDetectionEvents[iNdEx]) + copy(dAtA[i:], m.AnomalyDetectionEvents[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.AnomalyDetectionEvents[iNdEx]))) + i-- + dAtA[i] = 0x2a + } } - if len(m.Success) > 0 { - for _, s := range m.Success { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.ProfileCookie != 0 { + i = encodeVarint(dAtA, i, uint64(m.ProfileCookie)) + i-- + dAtA[i] = 0x20 + } + if m.Selector != nil { + size, err := m.Selector.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a } - if len(m.Fails) > 0 { - for _, s := range m.Fails { - l = len(s) - n += 1 + l + sov(uint64(l)) + if len(m.LoadedInKernelTimestamp) > 0 { + i -= len(m.LoadedInKernelTimestamp) + copy(dAtA[i:], m.LoadedInKernelTimestamp) + i = encodeVarint(dAtA, i, uint64(len(m.LoadedInKernelTimestamp))) + i-- + dAtA[i] = 0x12 + } + if m.LoadedInKernel { + i-- + if m.LoadedInKernel { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x8 } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *Status) SizeVT() (n int) { +func (m *SecurityProfileListParams) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - if m.Environment != nil { - l = m.Environment.SizeVT() - n += 1 + l + sov(uint64(l)) + return nil, nil } - if m.SelfTests != nil { - l = m.SelfTests.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ConstantFetcherStatus) SizeVT() (n int) { +func (m *SecurityProfileListParams) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SecurityProfileListParams) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Fetchers) > 0 { - for _, s := range m.Fetchers { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.Values) > 0 { - for _, e := range m.Values { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.IncludeCache { + i-- + if m.IncludeCache { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x8 } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *EnvironmentStatus) SizeVT() (n int) { +func (m *SecurityProfileListMessage) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - if len(m.Warnings) > 0 { - for _, s := range m.Warnings { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - if m.Constants != nil { - l = m.Constants.SizeVT() - n += 1 + l + sov(uint64(l)) - } - l = len(m.KernelLockdown) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.UseMmapableMaps { - n += 2 + return nil, nil } - if m.UseRingBuffer { - n += 2 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *DumpDiscardersParams) SizeVT() (n int) { +func (m *SecurityProfileListMessage) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SecurityProfileListMessage) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarint(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x12 + } + if len(m.Profiles) > 0 { + for iNdEx := len(m.Profiles) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Profiles[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *DumpDiscardersMessage) SizeVT() (n int) { +func (m *SecurityProfileSaveParams) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - l = len(m.DumpFilename) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *StorageRequestParams) SizeVT() (n int) { +func (m *SecurityProfileSaveParams) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SecurityProfileSaveParams) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.LocalStorageDirectory) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.LocalStorageFormats) > 0 { - for _, s := range m.LocalStorageFormats { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.Selector != nil { + size, err := m.Selector.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - if m.LocalStorageCompression { - n += 2 - } - if len(m.RemoteStorageFormats) > 0 { - for _, s := range m.RemoteStorageFormats { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + return len(dAtA) - i, nil +} + +func (m *SecurityProfileSaveMessage) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - if m.RemoteStorageCompression { - n += 2 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ActivityDumpParams) SizeVT() (n int) { +func (m *SecurityProfileSaveMessage) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SecurityProfileSaveMessage) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Comm) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.Timeout != 0 { - n += 1 + sov(uint64(m.Timeout)) + if len(m.File) > 0 { + i -= len(m.File) + copy(dAtA[i:], m.File) + i = encodeVarint(dAtA, i, uint64(len(m.File))) + i-- + dAtA[i] = 0x12 } - if m.DifferentiateArgs { - n += 2 + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarint(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0xa } - if m.Storage != nil { - l = m.Storage.SizeVT() - n += 1 + l + sov(uint64(l)) + return len(dAtA) - i, nil +} + +func encodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= sov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ } - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sov(uint64(l)) + dAtA[offset] = uint8(v) + return base +} +func (m *GetEventParams) SizeVT() (n int) { + if m == nil { + return 0 } + var l int + _ = l n += len(m.unknownFields) return n } -func (m *ActivityDumpMetadataMessage) SizeVT() (n int) { +func (m *SecurityEventMessage) SizeVT() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.AgentVersion) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.AgentCommit) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.KernelVersion) + l = len(m.RuleID) if l > 0 { n += 1 + l + sov(uint64(l)) } - l = len(m.LinuxDistribution) + l = len(m.Data) if l > 0 { n += 1 + l + sov(uint64(l)) } - l = len(m.Arch) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Tags) > 0 { + for _, s := range m.Tags { + l = len(s) + n += 1 + l + sov(uint64(l)) + } } - l = len(m.Name) + l = len(m.Service) if l > 0 { n += 1 + l + sov(uint64(l)) } - l = len(m.ProtobufVersion) - if l > 0 { - n += 1 + l + sov(uint64(l)) + n += len(m.unknownFields) + return n +} + +func (m *DumpProcessCacheParams) SizeVT() (n int) { + if m == nil { + return 0 } - if m.DifferentiateArgs { + var l int + _ = l + if m.WithArgs { n += 2 } - l = len(m.Comm) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Start) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Timeout) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.Size != 0 { - n += 1 + sov(uint64(m.Size)) - } - l = len(m.Serialization) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } n += len(m.unknownFields) return n } -func (m *StorageRequestMessage) SizeVT() (n int) { +func (m *SecurityDumpProcessCacheMessage) SizeVT() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Type) + l = len(m.Filename) if l > 0 { n += 1 + l + sov(uint64(l)) } - l = len(m.Format) - if l > 0 { - n += 1 + l + sov(uint64(l)) + n += len(m.unknownFields) + return n +} + +func (m *DumpNetworkNamespaceParams) SizeVT() (n int) { + if m == nil { + return 0 } - if m.Compression { + var l int + _ = l + if m.SnapshotInterfaces { n += 2 } - l = len(m.File) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } n += len(m.unknownFields) return n } -func (m *ActivityDumpMessage) SizeVT() (n int) { +func (m *DumpNetworkNamespaceMessage) SizeVT() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Host) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Source) + l = len(m.Error) if l > 0 { n += 1 + l + sov(uint64(l)) } - l = len(m.Service) + l = len(m.DumpFilename) if l > 0 { n += 1 + l + sov(uint64(l)) } - if len(m.Tags) > 0 { - for _, s := range m.Tags { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - if len(m.Storage) > 0 { - for _, e := range m.Storage { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } - } - if m.Metadata != nil { - l = m.Metadata.SizeVT() - n += 1 + l + sov(uint64(l)) - } - l = len(m.Error) + l = len(m.GraphFilename) if l > 0 { n += 1 + l + sov(uint64(l)) } - if len(m.DNSNames) > 0 { - for _, s := range m.DNSNames { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } n += len(m.unknownFields) return n } -func (m *ActivityDumpListParams) SizeVT() (n int) { +func (m *GetConfigParams) SizeVT() (n int) { if m == nil { return 0 } @@ -2297,92 +2412,161 @@ func (m *ActivityDumpListParams) SizeVT() (n int) { return n } -func (m *ActivityDumpListMessage) SizeVT() (n int) { +func (m *SecurityConfigMessage) SizeVT() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Dumps) > 0 { - for _, e := range m.Dumps { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + if m.RuntimeEnabled { + n += 2 } - l = len(m.Error) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.FIMEnabled { + n += 2 + } + if m.ActivityDumpEnabled { + n += 2 } n += len(m.unknownFields) return n } -func (m *ActivityDumpStopParams) SizeVT() (n int) { +func (m *RunSelfTestParams) SizeVT() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Comm) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } n += len(m.unknownFields) return n } -func (m *ActivityDumpStopMessage) SizeVT() (n int) { +func (m *ReloadPoliciesParams) SizeVT() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Error) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } n += len(m.unknownFields) return n } -func (m *TranscodingRequestParams) SizeVT() (n int) { +func (m *ReloadPoliciesResultMessage) SizeVT() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.ActivityDumpFile) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.Storage != nil { - l = m.Storage.SizeVT() - n += 1 + l + sov(uint64(l)) - } n += len(m.unknownFields) return n } -func (m *TranscodingRequestMessage) SizeVT() (n int) { +func (m *SecuritySelfTestResultMessage) SizeVT() (n int) { if m == nil { return 0 } var l int _ = l + if m.Ok { + n += 2 + } l = len(m.Error) if l > 0 { n += 1 + l + sov(uint64(l)) } - if len(m.Storage) > 0 { - for _, e := range m.Storage { + n += len(m.unknownFields) + return n +} + +func (m *GetStatusParams) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ConstantValueAndSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Value != 0 { + n += 1 + sov(uint64(m.Value)) + } + l = len(m.Source) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SelfTestsStatus) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LastTimestamp) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Success) > 0 { + for _, s := range m.Success { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Fails) > 0 { + for _, s := range m.Fails { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Status) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Environment != nil { + l = m.Environment.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.SelfTests != nil { + l = m.SelfTests.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ConstantFetcherStatus) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Fetchers) > 0 { + for _, s := range m.Fetchers { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Values) > 0 { + for _, e := range m.Values { l = e.SizeVT() n += 1 + l + sov(uint64(l)) } @@ -2391,27 +2575,114 @@ func (m *TranscodingRequestMessage) SizeVT() (n int) { return n } -func (m *ActivityDumpStreamParams) SizeVT() (n int) { +func (m *EnvironmentStatus) SizeVT() (n int) { if m == nil { return 0 } var l int _ = l + if len(m.Warnings) > 0 { + for _, s := range m.Warnings { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.Constants != nil { + l = m.Constants.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.KernelLockdown) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.UseMmapableMaps { + n += 2 + } + if m.UseRingBuffer { + n += 2 + } n += len(m.unknownFields) return n } -func (m *ActivityDumpStreamMessage) SizeVT() (n int) { +func (m *DumpDiscardersParams) SizeVT() (n int) { if m == nil { return 0 } var l int _ = l - if m.Dump != nil { - l = m.Dump.SizeVT() + n += len(m.unknownFields) + return n +} + +func (m *DumpDiscardersMessage) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DumpFilename) + if l > 0 { n += 1 + l + sov(uint64(l)) } - l = len(m.Data) + n += len(m.unknownFields) + return n +} + +func (m *StorageRequestParams) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LocalStorageDirectory) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.LocalStorageFormats) > 0 { + for _, s := range m.LocalStorageFormats { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.LocalStorageCompression { + n += 2 + } + if len(m.RemoteStorageFormats) > 0 { + for _, s := range m.RemoteStorageFormats { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.RemoteStorageCompression { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ActivityDumpParams) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Comm) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Timeout) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.DifferentiateArgs { + n += 2 + } + if m.Storage != nil { + l = m.Storage.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.ContainerID) if l > 0 { n += 1 + l + sov(uint64(l)) } @@ -2419,13 +2690,1849 @@ func (m *ActivityDumpStreamMessage) SizeVT() (n int) { return n } -func sov(x uint64) (n int) { - return (bits.Len64(x|1) + 6) / 7 -} -func soz(x uint64) (n int) { - return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +func (m *MetadataMessage) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AgentVersion) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.AgentCommit) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.KernelVersion) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.LinuxDistribution) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Arch) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ProtobufVersion) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.DifferentiateArgs { + n += 2 + } + l = len(m.Comm) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Start) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Timeout) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Size != 0 { + n += 1 + sov(uint64(m.Size)) + } + l = len(m.Serialization) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *StorageRequestMessage) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Format) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Compression { + n += 2 + } + l = len(m.File) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ActivityDumpMessage) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Source) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Service) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Tags) > 0 { + for _, s := range m.Tags { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Storage) > 0 { + for _, e := range m.Storage { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.Metadata != nil { + l = m.Metadata.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.DNSNames) > 0 { + for _, s := range m.DNSNames { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ActivityDumpListParams) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ActivityDumpListMessage) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Dumps) > 0 { + for _, e := range m.Dumps { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ActivityDumpStopParams) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Comm) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ActivityDumpStopMessage) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Error) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *TranscodingRequestParams) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ActivityDumpFile) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Storage != nil { + l = m.Storage.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *TranscodingRequestMessage) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Error) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Storage) > 0 { + for _, e := range m.Storage { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ActivityDumpStreamParams) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ActivityDumpStreamMessage) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Dump != nil { + l = m.Dump.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *WorkloadSelectorMessage) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Tag) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LastAnomalyTimestampMessage) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.EventType) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Timestamp) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.IsStableEventType { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *InstanceMessage) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Tags) > 0 { + for _, s := range m.Tags { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ActivityTreeStatsMessage) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ProcessNodesCount != 0 { + n += 1 + sov(uint64(m.ProcessNodesCount)) + } + if m.FileNodesCount != 0 { + n += 1 + sov(uint64(m.FileNodesCount)) + } + if m.DNSNodesCount != 0 { + n += 1 + sov(uint64(m.DNSNodesCount)) + } + if m.SocketNodesCount != 0 { + n += 1 + sov(uint64(m.SocketNodesCount)) + } + if m.ApproximateSize != 0 { + n += 1 + sov(uint64(m.ApproximateSize)) + } + n += len(m.unknownFields) + return n +} + +func (m *SecurityProfileMessage) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LoadedInKernel { + n += 2 + } + l = len(m.LoadedInKernelTimestamp) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Selector != nil { + l = m.Selector.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.ProfileCookie != 0 { + n += 1 + sov(uint64(m.ProfileCookie)) + } + if len(m.AnomalyDetectionEvents) > 0 { + for _, s := range m.AnomalyDetectionEvents { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.LastAnomalies) > 0 { + for _, e := range m.LastAnomalies { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Instances) > 0 { + for _, e := range m.Instances { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.Status) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Metadata != nil { + l = m.Metadata.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if len(m.Tags) > 0 { + for _, s := range m.Tags { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.Stats != nil { + l = m.Stats.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SecurityProfileListParams) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IncludeCache { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *SecurityProfileListMessage) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Profiles) > 0 { + for _, e := range m.Profiles { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SecurityProfileSaveParams) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Selector != nil { + l = m.Selector.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SecurityProfileSaveMessage) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Error) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.File) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func sov(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} +func soz(x uint64) (n int) { + return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GetEventParams) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetEventParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetEventParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecurityEventMessage) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecurityEventMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecurityEventMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuleID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuleID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tags = append(m.Tags, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Service = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DumpProcessCacheParams) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DumpProcessCacheParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DumpProcessCacheParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WithArgs", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.WithArgs = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecurityDumpProcessCacheMessage) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecurityDumpProcessCacheMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecurityDumpProcessCacheMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filename", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filename = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DumpNetworkNamespaceParams) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DumpNetworkNamespaceParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DumpNetworkNamespaceParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SnapshotInterfaces", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SnapshotInterfaces = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DumpNetworkNamespaceMessage) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DumpNetworkNamespaceMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DumpNetworkNamespaceMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DumpFilename", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DumpFilename = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GraphFilename", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GraphFilename = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetConfigParams) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetConfigParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetConfigParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecurityConfigMessage) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecurityConfigMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecurityConfigMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeEnabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RuntimeEnabled = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FIMEnabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.FIMEnabled = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActivityDumpEnabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ActivityDumpEnabled = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunSelfTestParams) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunSelfTestParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunSelfTestParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReloadPoliciesParams) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReloadPoliciesParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReloadPoliciesParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReloadPoliciesResultMessage) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReloadPoliciesResultMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReloadPoliciesResultMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecuritySelfTestResultMessage) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecuritySelfTestResultMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecuritySelfTestResultMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ok", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Ok = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetStatusParams) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetStatusParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetStatusParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConstantValueAndSource) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConstantValueAndSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConstantValueAndSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfTestsStatus) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfTestsStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfTestsStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTimestamp", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastTimestamp = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Success = append(m.Success, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fails", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Fails = append(m.Fails, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } -func (m *GetEventParams) UnmarshalVT(dAtA []byte) error { +func (m *Status) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2448,12 +4555,84 @@ func (m *GetEventParams) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetEventParams: wiretype end group for non-group") + return fmt.Errorf("proto: Status: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetEventParams: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Environment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Environment == nil { + m.Environment = &EnvironmentStatus{} + } + if err := m.Environment.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelfTests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SelfTests == nil { + m.SelfTests = &SelfTestsStatus{} + } + if err := m.SelfTests.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -2476,7 +4655,7 @@ func (m *GetEventParams) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SecurityEventMessage) UnmarshalVT(dAtA []byte) error { +func (m *ConstantFetcherStatus) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2499,15 +4678,15 @@ func (m *SecurityEventMessage) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SecurityEventMessage: wiretype end group for non-group") + return fmt.Errorf("proto: ConstantFetcherStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SecurityEventMessage: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConstantFetcherStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuleID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Fetchers", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2535,13 +4714,13 @@ func (m *SecurityEventMessage) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.RuleID = string(dAtA[iNdEx:postIndex]) + m.Fetchers = append(m.Fetchers, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -2551,29 +4730,80 @@ func (m *SecurityEventMessage) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} + m.Values = append(m.Values, &ConstantValueAndSource{}) + if err := m.Values[len(m.Values)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex - case 3: + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvironmentStatus) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnvironmentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnvironmentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2601,11 +4831,47 @@ func (m *SecurityEventMessage) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tags = append(m.Tags, string(dAtA[iNdEx:postIndex])) + m.Warnings = append(m.Warnings, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Constants", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Constants == nil { + m.Constants = &ConstantFetcherStatus{} + } + if err := m.Constants.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KernelLockdown", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2633,8 +4899,48 @@ func (m *SecurityEventMessage) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Service = string(dAtA[iNdEx:postIndex]) + m.KernelLockdown = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UseMmapableMaps", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UseMmapableMaps = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UseRingBuffer", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UseRingBuffer = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -2657,7 +4963,7 @@ func (m *SecurityEventMessage) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DumpProcessCacheParams) UnmarshalVT(dAtA []byte) error { +func (m *DumpDiscardersParams) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2680,32 +4986,12 @@ func (m *DumpProcessCacheParams) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DumpProcessCacheParams: wiretype end group for non-group") + return fmt.Errorf("proto: DumpDiscardersParams: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DumpProcessCacheParams: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DumpDiscardersParams: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WithArgs", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.WithArgs = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -2728,7 +5014,7 @@ func (m *DumpProcessCacheParams) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SecurityDumpProcessCacheMessage) UnmarshalVT(dAtA []byte) error { +func (m *DumpDiscardersMessage) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2751,15 +5037,15 @@ func (m *SecurityDumpProcessCacheMessage) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SecurityDumpProcessCacheMessage: wiretype end group for non-group") + return fmt.Errorf("proto: DumpDiscardersMessage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SecurityDumpProcessCacheMessage: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DumpDiscardersMessage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filename", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DumpFilename", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2787,7 +5073,7 @@ func (m *SecurityDumpProcessCacheMessage) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Filename = string(dAtA[iNdEx:postIndex]) + m.DumpFilename = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -2811,7 +5097,7 @@ func (m *SecurityDumpProcessCacheMessage) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DumpNetworkNamespaceParams) UnmarshalVT(dAtA []byte) error { +func (m *StorageRequestParams) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2834,15 +5120,79 @@ func (m *DumpNetworkNamespaceParams) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DumpNetworkNamespaceParams: wiretype end group for non-group") + return fmt.Errorf("proto: StorageRequestParams: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DumpNetworkNamespaceParams: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StorageRequestParams: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalStorageDirectory", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LocalStorageDirectory = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalStorageFormats", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LocalStorageFormats = append(m.LocalStorageFormats, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SnapshotInterfaces", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalStorageCompression", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -2859,7 +5209,59 @@ func (m *DumpNetworkNamespaceParams) UnmarshalVT(dAtA []byte) error { break } } - m.SnapshotInterfaces = bool(v != 0) + m.LocalStorageCompression = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RemoteStorageFormats", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RemoteStorageFormats = append(m.RemoteStorageFormats, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RemoteStorageCompression", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RemoteStorageCompression = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -2882,7 +5284,7 @@ func (m *DumpNetworkNamespaceParams) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DumpNetworkNamespaceMessage) UnmarshalVT(dAtA []byte) error { +func (m *ActivityDumpParams) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2905,15 +5307,15 @@ func (m *DumpNetworkNamespaceMessage) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DumpNetworkNamespaceMessage: wiretype end group for non-group") + return fmt.Errorf("proto: ActivityDumpParams: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DumpNetworkNamespaceMessage: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ActivityDumpParams: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Comm", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2941,11 +5343,11 @@ func (m *DumpNetworkNamespaceMessage) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Error = string(dAtA[iNdEx:postIndex]) + m.Comm = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DumpFilename", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2973,13 +5375,33 @@ func (m *DumpNetworkNamespaceMessage) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DumpFilename = string(dAtA[iNdEx:postIndex]) + m.Timeout = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DifferentiateArgs", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DifferentiateArgs = bool(v != 0) + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GraphFilename", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -2989,75 +5411,60 @@ func (m *DumpNetworkNamespaceMessage) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.GraphFilename = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { + if m.Storage == nil { + m.Storage = &StorageRequestParams{} + } + if err := m.Storage.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetConfigParams) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - if iNdEx >= l { - return io.ErrUnexpectedEOF + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if postIndex > l { + return io.ErrUnexpectedEOF } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetConfigParams: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetConfigParams: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -3080,7 +5487,7 @@ func (m *GetConfigParams) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SecurityConfigMessage) UnmarshalVT(dAtA []byte) error { +func (m *MetadataMessage) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3103,17 +5510,17 @@ func (m *SecurityConfigMessage) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SecurityConfigMessage: wiretype end group for non-group") + return fmt.Errorf("proto: MetadataMessage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SecurityConfigMessage: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MetadataMessage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeEnabled", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentVersion", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -3123,17 +5530,29 @@ func (m *SecurityConfigMessage) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.RuntimeEnabled = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AgentVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FIMEnabled", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentCommit", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -3143,17 +5562,29 @@ func (m *SecurityConfigMessage) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.FIMEnabled = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AgentCommit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActivityDumpEnabled", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KernelVersion", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -3163,219 +5594,155 @@ func (m *SecurityConfigMessage) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.ActivityDumpEnabled = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - if (skippy < 0) || (iNdEx+skippy) < 0 { + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RunSelfTestParams) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.KernelVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LinuxDistribution", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RunSelfTestParams: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RunSelfTestParams: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - if (skippy < 0) || (iNdEx+skippy) < 0 { + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReloadPoliciesParams) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.LinuxDistribution = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Arch", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReloadPoliciesParams: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReloadPoliciesParams: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - if (skippy < 0) || (iNdEx+skippy) < 0 { + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Arch = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReloadPoliciesResultMessage) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProtobufVersion", wireType) } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReloadPoliciesResultMessage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReloadPoliciesResultMessage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (skippy < 0) || (iNdEx+skippy) < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SecuritySelfTestResultMessage) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SecuritySelfTestResultMessage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SecuritySelfTestResultMessage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.ProtobufVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Ok", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DifferentiateArgs", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -3392,10 +5759,10 @@ func (m *SecuritySelfTestResultMessage) UnmarshalVT(dAtA []byte) error { break } } - m.Ok = bool(v != 0) - case 2: + m.DifferentiateArgs = bool(v != 0) + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Comm", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3423,113 +5790,75 @@ func (m *SecuritySelfTestResultMessage) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Error = string(dAtA[iNdEx:postIndex]) + m.Comm = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetStatusParams) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetStatusParams: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetStatusParams: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (skippy < 0) || (iNdEx+skippy) < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConstantValueAndSource) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConstantValueAndSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConstantValueAndSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Start = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3557,13 +5886,13 @@ func (m *ConstantValueAndSource) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ID = string(dAtA[iNdEx:postIndex]) + m.Timeout = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 13: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Size", wireType) } - m.Value = 0 + m.Size = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -3573,14 +5902,14 @@ func (m *ConstantValueAndSource) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Value |= uint64(b&0x7F) << shift + m.Size |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 3: + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Serialization", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3608,7 +5937,7 @@ func (m *ConstantValueAndSource) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Source = string(dAtA[iNdEx:postIndex]) + m.Serialization = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -3632,7 +5961,7 @@ func (m *ConstantValueAndSource) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SelfTestsStatus) UnmarshalVT(dAtA []byte) error { +func (m *StorageRequestMessage) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3655,15 +5984,15 @@ func (m *SelfTestsStatus) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SelfTestsStatus: wiretype end group for non-group") + return fmt.Errorf("proto: StorageRequestMessage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SelfTestsStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StorageRequestMessage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTimestamp", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3691,11 +6020,11 @@ func (m *SelfTestsStatus) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.LastTimestamp = string(dAtA[iNdEx:postIndex]) + m.Type = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3723,11 +6052,31 @@ func (m *SelfTestsStatus) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Success = append(m.Success, string(dAtA[iNdEx:postIndex])) + m.Format = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Compression", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Compression = bool(v != 0) + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Fails", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3755,7 +6104,7 @@ func (m *SelfTestsStatus) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Fails = append(m.Fails, string(dAtA[iNdEx:postIndex])) + m.File = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -3779,7 +6128,7 @@ func (m *SelfTestsStatus) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *Status) UnmarshalVT(dAtA []byte) error { +func (m *ActivityDumpMessage) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3802,17 +6151,17 @@ func (m *Status) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Status: wiretype end group for non-group") + return fmt.Errorf("proto: ActivityDumpMessage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ActivityDumpMessage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Environment", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -3822,118 +6171,27 @@ func (m *Status) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Environment == nil { - m.Environment = &EnvironmentStatus{} - } - if err := m.Environment.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Host = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SelfTests", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SelfTests == nil { - m.SelfTests = &SelfTestsStatus{} - } - if err := m.SelfTests.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConstantFetcherStatus) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConstantFetcherStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConstantFetcherStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Fetchers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3961,13 +6219,13 @@ func (m *ConstantFetcherStatus) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Fetchers = append(m.Fetchers, string(dAtA[iNdEx:postIndex])) + m.Source = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -3977,80 +6235,27 @@ func (m *ConstantFetcherStatus) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF - } - m.Values = append(m.Values, &ConstantValueAndSource{}) - if err := m.Values[len(m.Values)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EnvironmentStatus) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EnvironmentStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EnvironmentStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + } + m.Service = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4078,11 +6283,11 @@ func (m *EnvironmentStatus) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Warnings = append(m.Warnings, string(dAtA[iNdEx:postIndex])) + m.Tags = append(m.Tags, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Constants", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4109,18 +6314,16 @@ func (m *EnvironmentStatus) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Constants == nil { - m.Constants = &ConstantFetcherStatus{} - } - if err := m.Constants.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Storage = append(m.Storage, &StorageRequestMessage{}) + if err := m.Storage[len(m.Storage)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KernelLockdown", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -4130,29 +6333,33 @@ func (m *EnvironmentStatus) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.KernelLockdown = string(dAtA[iNdEx:postIndex]) + if m.Metadata == nil { + m.Metadata = &MetadataMessage{} + } + if err := m.Metadata.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UseMmapableMaps", wireType) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -4162,17 +6369,29 @@ func (m *EnvironmentStatus) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.UseMmapableMaps = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UseRingBuffer", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - var v int + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSNames", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -4182,63 +6401,24 @@ func (m *EnvironmentStatus) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.UseRingBuffer = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DumpDiscardersParams) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DumpDiscardersParams: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DumpDiscardersParams: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + m.DNSNames = append(m.DNSNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -4261,7 +6441,7 @@ func (m *DumpDiscardersParams) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DumpDiscardersMessage) UnmarshalVT(dAtA []byte) error { +func (m *ActivityDumpListParams) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4284,44 +6464,12 @@ func (m *DumpDiscardersMessage) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DumpDiscardersMessage: wiretype end group for non-group") + return fmt.Errorf("proto: ActivityDumpListParams: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DumpDiscardersMessage: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ActivityDumpListParams: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DumpFilename", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DumpFilename = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -4344,7 +6492,7 @@ func (m *DumpDiscardersMessage) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *StorageRequestParams) UnmarshalVT(dAtA []byte) error { +func (m *ActivityDumpListMessage) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4367,101 +6515,17 @@ func (m *StorageRequestParams) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StorageRequestParams: wiretype end group for non-group") + return fmt.Errorf("proto: ActivityDumpListMessage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StorageRequestParams: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ActivityDumpListMessage: illegal tag %d (wire type %d)", fieldNum, wire) } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalStorageDirectory", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LocalStorageDirectory = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalStorageFormats", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LocalStorageFormats = append(m.LocalStorageFormats, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalStorageCompression", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.LocalStorageCompression = bool(v != 0) - case 4: + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RemoteStorageFormats", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Dumps", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -4471,29 +6535,31 @@ func (m *StorageRequestParams) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.RemoteStorageFormats = append(m.RemoteStorageFormats, string(dAtA[iNdEx:postIndex])) + m.Dumps = append(m.Dumps, &ActivityDumpMessage{}) + if err := m.Dumps[len(m.Dumps)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RemoteStorageCompression", wireType) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -4503,12 +6569,24 @@ func (m *StorageRequestParams) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.RemoteStorageCompression = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -4531,7 +6609,7 @@ func (m *StorageRequestParams) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ActivityDumpParams) UnmarshalVT(dAtA []byte) error { +func (m *ActivityDumpStopParams) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4554,15 +6632,15 @@ func (m *ActivityDumpParams) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ActivityDumpParams: wiretype end group for non-group") + return fmt.Errorf("proto: ActivityDumpStopParams: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ActivityDumpParams: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ActivityDumpStopParams: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Comm", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4590,52 +6668,13 @@ func (m *ActivityDumpParams) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Comm = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) - } - m.Timeout = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Timeout |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DifferentiateArgs", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DifferentiateArgs = bool(v != 0) - case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -4645,31 +6684,27 @@ func (m *ActivityDumpParams) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Storage == nil { - m.Storage = &StorageRequestParams{} - } - if err := m.Storage.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ContainerID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Comm", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4697,7 +6732,7 @@ func (m *ActivityDumpParams) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ContainerID = string(dAtA[iNdEx:postIndex]) + m.Comm = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -4721,7 +6756,7 @@ func (m *ActivityDumpParams) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ActivityDumpMetadataMessage) UnmarshalVT(dAtA []byte) error { +func (m *ActivityDumpStopMessage) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4744,15 +6779,15 @@ func (m *ActivityDumpMetadataMessage) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ActivityDumpMetadataMessage: wiretype end group for non-group") + return fmt.Errorf("proto: ActivityDumpStopMessage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ActivityDumpMetadataMessage: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ActivityDumpStopMessage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4780,11 +6815,62 @@ func (m *ActivityDumpMetadataMessage) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AgentVersion = string(dAtA[iNdEx:postIndex]) + m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TranscodingRequestParams) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TranscodingRequestParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TranscodingRequestParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentCommit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ActivityDumpFile", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4812,13 +6898,13 @@ func (m *ActivityDumpMetadataMessage) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AgentCommit = string(dAtA[iNdEx:postIndex]) + m.ActivityDumpFile = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KernelVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -4828,59 +6914,82 @@ func (m *ActivityDumpMetadataMessage) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.KernelVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LinuxDistribution", wireType) + if m.Storage == nil { + m.Storage = &StorageRequestParams{} } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.Storage.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TranscodingRequestMessage) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.LinuxDistribution = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TranscodingRequestMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TranscodingRequestMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Arch", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4908,13 +7017,13 @@ func (m *ActivityDumpMetadataMessage) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Arch = string(dAtA[iNdEx:postIndex]) + m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -4924,81 +7033,133 @@ func (m *ActivityDumpMetadataMessage) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProtobufVersion", wireType) + m.Storage = append(m.Storage, &StorageRequestMessage{}) + if err := m.Storage[len(m.Storage)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActivityDumpStreamParams) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActivityDumpStreamParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActivityDumpStreamParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.ProtobufVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DifferentiateArgs", wireType) + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActivityDumpStreamMessage) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if iNdEx >= l { + return io.ErrUnexpectedEOF } - m.DifferentiateArgs = bool(v != 0) - case 9: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActivityDumpStreamMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActivityDumpStreamMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Comm", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Dump", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5008,29 +7169,33 @@ func (m *ActivityDumpMetadataMessage) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Comm = string(dAtA[iNdEx:postIndex]) + if m.Dump == nil { + m.Dump = &ActivityDumpMessage{} + } + if err := m.Dump.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 10: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5040,59 +7205,80 @@ func (m *ActivityDumpMetadataMessage) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkloadSelectorMessage) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Start = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 12: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkloadSelectorMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkloadSelectorMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5120,30 +7306,11 @@ func (m *ActivityDumpMetadataMessage) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Timeout = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Size", wireType) - } - m.Size = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Size |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 14: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Serialization", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5171,7 +7338,7 @@ func (m *ActivityDumpMetadataMessage) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Serialization = string(dAtA[iNdEx:postIndex]) + m.Tag = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -5195,7 +7362,7 @@ func (m *ActivityDumpMetadataMessage) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *StorageRequestMessage) UnmarshalVT(dAtA []byte) error { +func (m *LastAnomalyTimestampMessage) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5218,15 +7385,15 @@ func (m *StorageRequestMessage) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StorageRequestMessage: wiretype end group for non-group") + return fmt.Errorf("proto: LastAnomalyTimestampMessage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StorageRequestMessage: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LastAnomalyTimestampMessage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EventType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5254,11 +7421,11 @@ func (m *StorageRequestMessage) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = string(dAtA[iNdEx:postIndex]) + m.EventType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5286,11 +7453,11 @@ func (m *StorageRequestMessage) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Format = string(dAtA[iNdEx:postIndex]) + m.Timestamp = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Compression", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IsStableEventType", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -5307,10 +7474,61 @@ func (m *StorageRequestMessage) UnmarshalVT(dAtA []byte) error { break } } - m.Compression = bool(v != 0) - case 4: + m.IsStableEventType = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InstanceMessage) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InstanceMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InstanceMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5338,7 +7556,39 @@ func (m *StorageRequestMessage) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.File = string(dAtA[iNdEx:postIndex]) + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tags = append(m.Tags, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -5362,7 +7612,7 @@ func (m *StorageRequestMessage) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ActivityDumpMessage) UnmarshalVT(dAtA []byte) error { +func (m *ActivityTreeStatsMessage) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5385,17 +7635,17 @@ func (m *ActivityDumpMessage) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ActivityDumpMessage: wiretype end group for non-group") + return fmt.Errorf("proto: ActivityTreeStatsMessage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ActivityDumpMessage: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ActivityTreeStatsMessage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessNodesCount", wireType) } - var stringLen uint64 + m.ProcessNodesCount = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5405,29 +7655,54 @@ func (m *ActivityDumpMessage) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.ProcessNodesCount |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FileNodesCount", wireType) } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + m.FileNodesCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FileNodesCount |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - if postIndex > l { - return io.ErrUnexpectedEOF + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSNodesCount", wireType) } - m.Host = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + m.DNSNodesCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DNSNodesCount |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - var stringLen uint64 + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SocketNodesCount", wireType) + } + m.SocketNodesCount = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5437,29 +7712,86 @@ func (m *ActivityDumpMessage) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.SocketNodesCount |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ApproximateSize", wireType) } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + m.ApproximateSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ApproximateSize |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecurityProfileMessage) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Source = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecurityProfileMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecurityProfileMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadedInKernel", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5469,27 +7801,15 @@ func (m *ActivityDumpMessage) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Service = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: + m.LoadedInKernel = bool(v != 0) + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LoadedInKernelTimestamp", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5517,11 +7837,11 @@ func (m *ActivityDumpMessage) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tags = append(m.Tags, string(dAtA[iNdEx:postIndex])) + m.LoadedInKernelTimestamp = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5548,16 +7868,18 @@ func (m *ActivityDumpMessage) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Storage = append(m.Storage, &StorageRequestMessage{}) - if err := m.Storage[len(m.Storage)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.Selector == nil { + m.Selector = &WorkloadSelectorMessage{} + } + if err := m.Selector.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProfileCookie", wireType) } - var msglen int + m.ProfileCookie = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5567,31 +7889,14 @@ func (m *ActivityDumpMessage) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.ProfileCookie |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metadata == nil { - m.Metadata = &ActivityDumpMetadataMessage{} - } - if err := m.Metadata.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AnomalyDetectionEvents", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5619,13 +7924,13 @@ func (m *ActivityDumpMessage) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Error = string(dAtA[iNdEx:postIndex]) + m.AnomalyDetectionEvents = append(m.AnomalyDetectionEvents, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 8: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DNSNames", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastAnomalies", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5635,129 +7940,29 @@ func (m *ActivityDumpMessage) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.DNSNames = append(m.DNSNames, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ActivityDumpListParams) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ActivityDumpListParams: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ActivityDumpListParams: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { + m.LastAnomalies = append(m.LastAnomalies, &LastAnomalyTimestampMessage{}) + if err := m.LastAnomalies[len(m.LastAnomalies)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ActivityDumpListMessage) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ActivityDumpListMessage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ActivityDumpListMessage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Dumps", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Instances", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5784,14 +7989,14 @@ func (m *ActivityDumpListMessage) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Dumps = append(m.Dumps, &ActivityDumpMessage{}) - if err := m.Dumps[len(m.Dumps)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Instances = append(m.Instances, &InstanceMessage{}) + if err := m.Instances[len(m.Instances)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5810,71 +8015,20 @@ func (m *ActivityDumpListMessage) UnmarshalVT(dAtA []byte) error { } intStringLen := int(stringLen) if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ActivityDumpStopParams) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ActivityDumpStopParams: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ActivityDumpStopParams: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5902,13 +8056,13 @@ func (m *ActivityDumpStopParams) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5918,27 +8072,31 @@ func (m *ActivityDumpStopParams) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.ContainerID = string(dAtA[iNdEx:postIndex]) + if m.Metadata == nil { + m.Metadata = &MetadataMessage{} + } + if err := m.Metadata.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Comm", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5966,7 +8124,43 @@ func (m *ActivityDumpStopParams) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Comm = string(dAtA[iNdEx:postIndex]) + m.Tags = append(m.Tags, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Stats == nil { + m.Stats = &ActivityTreeStatsMessage{} + } + if err := m.Stats.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -5990,7 +8184,7 @@ func (m *ActivityDumpStopParams) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ActivityDumpStopMessage) UnmarshalVT(dAtA []byte) error { +func (m *SecurityProfileListParams) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6013,17 +8207,17 @@ func (m *ActivityDumpStopMessage) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ActivityDumpStopMessage: wiretype end group for non-group") + return fmt.Errorf("proto: SecurityProfileListParams: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ActivityDumpStopMessage: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SecurityProfileListParams: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeCache", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -6033,24 +8227,12 @@ func (m *ActivityDumpStopMessage) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.IncludeCache = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -6073,7 +8255,7 @@ func (m *ActivityDumpStopMessage) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *TranscodingRequestParams) UnmarshalVT(dAtA []byte) error { +func (m *SecurityProfileListMessage) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6096,17 +8278,17 @@ func (m *TranscodingRequestParams) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: TranscodingRequestParams: wiretype end group for non-group") + return fmt.Errorf("proto: SecurityProfileListMessage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: TranscodingRequestParams: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SecurityProfileListMessage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ActivityDumpFile", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Profiles", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -6116,29 +8298,31 @@ func (m *TranscodingRequestParams) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.ActivityDumpFile = string(dAtA[iNdEx:postIndex]) + m.Profiles = append(m.Profiles, &SecurityProfileMessage{}) + if err := m.Profiles[len(m.Profiles)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -6148,27 +8332,23 @@ func (m *TranscodingRequestParams) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Storage == nil { - m.Storage = &StorageRequestParams{} - } - if err := m.Storage.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -6192,7 +8372,7 @@ func (m *TranscodingRequestParams) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *TranscodingRequestMessage) UnmarshalVT(dAtA []byte) error { +func (m *SecurityProfileSaveParams) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6215,47 +8395,15 @@ func (m *TranscodingRequestMessage) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: TranscodingRequestMessage: wiretype end group for non-group") + return fmt.Errorf("proto: SecurityProfileSaveParams: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: TranscodingRequestMessage: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SecurityProfileSaveParams: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6282,62 +8430,13 @@ func (m *TranscodingRequestMessage) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Storage = append(m.Storage, &StorageRequestMessage{}) - if err := m.Storage[len(m.Storage)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.Selector == nil { + m.Selector = &WorkloadSelectorMessage{} } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { + if err := m.Selector.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ActivityDumpStreamParams) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ActivityDumpStreamParams: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ActivityDumpStreamParams: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -6360,7 +8459,7 @@ func (m *ActivityDumpStreamParams) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ActivityDumpStreamMessage) UnmarshalVT(dAtA []byte) error { +func (m *SecurityProfileSaveMessage) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6383,17 +8482,17 @@ func (m *ActivityDumpStreamMessage) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ActivityDumpStreamMessage: wiretype end group for non-group") + return fmt.Errorf("proto: SecurityProfileSaveMessage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ActivityDumpStreamMessage: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SecurityProfileSaveMessage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Dump", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -6403,33 +8502,29 @@ func (m *ActivityDumpStreamMessage) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Dump == nil { - m.Dump = &ActivityDumpMessage{} - } - if err := m.Dump.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -6439,25 +8534,23 @@ func (m *ActivityDumpStreamMessage) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } + m.File = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex diff --git a/pkg/security/proto/api/mocks/security_module_client.go b/pkg/security/proto/api/mocks/security_module_client.go index 548d164201be0..bc4070c84dcf4 100644 --- a/pkg/security/proto/api/mocks/security_module_client.go +++ b/pkg/security/proto/api/mocks/security_module_client.go @@ -314,6 +314,39 @@ func (_m *SecurityModuleClient) ListActivityDumps(ctx context.Context, in *api.A return r0, r1 } +// ListSecurityProfiles provides a mock function with given fields: ctx, in, opts +func (_m *SecurityModuleClient) ListSecurityProfiles(ctx context.Context, in *api.SecurityProfileListParams, opts ...grpc.CallOption) (*api.SecurityProfileListMessage, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 *api.SecurityProfileListMessage + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *api.SecurityProfileListParams, ...grpc.CallOption) (*api.SecurityProfileListMessage, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *api.SecurityProfileListParams, ...grpc.CallOption) *api.SecurityProfileListMessage); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*api.SecurityProfileListMessage) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *api.SecurityProfileListParams, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // ReloadPolicies provides a mock function with given fields: ctx, in, opts func (_m *SecurityModuleClient) ReloadPolicies(ctx context.Context, in *api.ReloadPoliciesParams, opts ...grpc.CallOption) (*api.ReloadPoliciesResultMessage, error) { _va := make([]interface{}, len(opts)) @@ -380,6 +413,39 @@ func (_m *SecurityModuleClient) RunSelfTest(ctx context.Context, in *api.RunSelf return r0, r1 } +// SaveSecurityProfile provides a mock function with given fields: ctx, in, opts +func (_m *SecurityModuleClient) SaveSecurityProfile(ctx context.Context, in *api.SecurityProfileSaveParams, opts ...grpc.CallOption) (*api.SecurityProfileSaveMessage, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 *api.SecurityProfileSaveMessage + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *api.SecurityProfileSaveParams, ...grpc.CallOption) (*api.SecurityProfileSaveMessage, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *api.SecurityProfileSaveParams, ...grpc.CallOption) *api.SecurityProfileSaveMessage); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*api.SecurityProfileSaveMessage) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *api.SecurityProfileSaveParams, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // StopActivityDump provides a mock function with given fields: ctx, in, opts func (_m *SecurityModuleClient) StopActivityDump(ctx context.Context, in *api.ActivityDumpStopParams, opts ...grpc.CallOption) (*api.ActivityDumpStopMessage, error) { _va := make([]interface{}, len(opts)) diff --git a/pkg/security/proto/api/mocks/security_module_server.go b/pkg/security/proto/api/mocks/security_module_server.go index 0c8e7d4dff4cb..f2f681df7c74f 100644 --- a/pkg/security/proto/api/mocks/security_module_server.go +++ b/pkg/security/proto/api/mocks/security_module_server.go @@ -225,6 +225,32 @@ func (_m *SecurityModuleServer) ListActivityDumps(_a0 context.Context, _a1 *api. return r0, r1 } +// ListSecurityProfiles provides a mock function with given fields: _a0, _a1 +func (_m *SecurityModuleServer) ListSecurityProfiles(_a0 context.Context, _a1 *api.SecurityProfileListParams) (*api.SecurityProfileListMessage, error) { + ret := _m.Called(_a0, _a1) + + var r0 *api.SecurityProfileListMessage + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *api.SecurityProfileListParams) (*api.SecurityProfileListMessage, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *api.SecurityProfileListParams) *api.SecurityProfileListMessage); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*api.SecurityProfileListMessage) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *api.SecurityProfileListParams) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // ReloadPolicies provides a mock function with given fields: _a0, _a1 func (_m *SecurityModuleServer) ReloadPolicies(_a0 context.Context, _a1 *api.ReloadPoliciesParams) (*api.ReloadPoliciesResultMessage, error) { ret := _m.Called(_a0, _a1) @@ -277,6 +303,32 @@ func (_m *SecurityModuleServer) RunSelfTest(_a0 context.Context, _a1 *api.RunSel return r0, r1 } +// SaveSecurityProfile provides a mock function with given fields: _a0, _a1 +func (_m *SecurityModuleServer) SaveSecurityProfile(_a0 context.Context, _a1 *api.SecurityProfileSaveParams) (*api.SecurityProfileSaveMessage, error) { + ret := _m.Called(_a0, _a1) + + var r0 *api.SecurityProfileSaveMessage + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *api.SecurityProfileSaveParams) (*api.SecurityProfileSaveMessage, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *api.SecurityProfileSaveParams) *api.SecurityProfileSaveMessage); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*api.SecurityProfileSaveMessage) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *api.SecurityProfileSaveParams) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // StopActivityDump provides a mock function with given fields: _a0, _a1 func (_m *SecurityModuleServer) StopActivityDump(_a0 context.Context, _a1 *api.ActivityDumpStopParams) (*api.ActivityDumpStopMessage, error) { ret := _m.Called(_a0, _a1) diff --git a/pkg/security/reporter/reporter.go b/pkg/security/reporter/reporter.go new file mode 100644 index 0000000000000..baf97b1a9f613 --- /dev/null +++ b/pkg/security/reporter/reporter.go @@ -0,0 +1,66 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package reporter + +import ( + "time" + + pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/logs/auditor" + "github.com/DataDog/datadog-agent/pkg/logs/client" + logsconfig "github.com/DataDog/datadog-agent/pkg/logs/config" + "github.com/DataDog/datadog-agent/pkg/logs/diagnostic" + "github.com/DataDog/datadog-agent/pkg/logs/message" + "github.com/DataDog/datadog-agent/pkg/logs/pipeline" + "github.com/DataDog/datadog-agent/pkg/logs/sources" + seccommon "github.com/DataDog/datadog-agent/pkg/security/common" + "github.com/DataDog/datadog-agent/pkg/status/health" + "github.com/DataDog/datadog-agent/pkg/util/startstop" +) + +type RuntimeReporter struct { + logSource *sources.LogSource + logChan chan *message.Message +} + +func (r *RuntimeReporter) ReportRaw(content []byte, service string, tags ...string) { + origin := message.NewOrigin(r.logSource) + origin.SetTags(tags) + origin.SetService(service) + msg := message.NewMessage(content, origin, message.StatusInfo, time.Now().UnixNano()) + r.logChan <- msg +} + +func NewCWSReporter(runPath string, stopper startstop.Stopper, endpoints *logsconfig.Endpoints, context *client.DestinationsContext) (seccommon.RawReporter, error) { + return newReporter(runPath, stopper, "runtime-security-agent", "runtime-security", endpoints, context) +} + +func newReporter(runPath string, stopper startstop.Stopper, sourceName, sourceType string, endpoints *logsconfig.Endpoints, context *client.DestinationsContext) (seccommon.RawReporter, error) { + health := health.RegisterLiveness("runtime-security") + + // setup the auditor + auditor := auditor.New(runPath, "runtime-security-registry.json", pkgconfig.DefaultAuditorTTL, health) + auditor.Start() + stopper.Add(auditor) + + // setup the pipeline provider that provides pairs of processor and sender + pipelineProvider := pipeline.NewProvider(logsconfig.NumberOfPipelines, auditor, &diagnostic.NoopMessageReceiver{}, nil, endpoints, context) + pipelineProvider.Start() + stopper.Add(pipelineProvider) + + logSource := sources.NewLogSource( + sourceName, + &logsconfig.LogsConfig{ + Type: sourceType, + Source: sourceName, + }, + ) + logChan := pipelineProvider.NextPipelineChan() + return &RuntimeReporter{ + logSource: logSource, + logChan: logChan, + }, nil +} diff --git a/pkg/security/resolvers/cgroup/model/model.go b/pkg/security/resolvers/cgroup/model/model.go index 627843e8b0093..b15027cb0260a 100644 --- a/pkg/security/resolvers/cgroup/model/model.go +++ b/pkg/security/resolvers/cgroup/model/model.go @@ -38,9 +38,9 @@ func NewWorkloadSelector(image string, tag string) (WorkloadSelector, error) { }, nil } -// IsEmpty returns true if the selector is set -func (ws *WorkloadSelector) IsEmpty() bool { - return len(ws.Tag) != 0 && len(ws.Image) != 0 +// IsReady returns true if the selector is ready +func (ws *WorkloadSelector) IsReady() bool { + return len(ws.Image) != 0 } // Match returns true if the input selector matches the current selector @@ -122,5 +122,5 @@ func (cgce *CacheEntry) SetTags(tags []string) { // NeedsTagsResolution returns true if this workload is missing its tags func (cgce *CacheEntry) NeedsTagsResolution() bool { - return len(cgce.ID) != 0 && !cgce.WorkloadSelector.IsEmpty() + return len(cgce.ID) != 0 && !cgce.WorkloadSelector.IsReady() } diff --git a/pkg/security/resolvers/mount/resolver.go b/pkg/security/resolvers/mount/resolver.go index 9b515b7a41581..75bdc61bd4c79 100644 --- a/pkg/security/resolvers/mount/resolver.go +++ b/pkg/security/resolvers/mount/resolver.go @@ -26,6 +26,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/metrics" "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup" "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/utils" "github.com/DataDog/datadog-agent/pkg/util/kernel" ) @@ -90,7 +91,7 @@ type Resolver struct { deleteQueue []deleteRequest minMountID uint32 redemption *simplelru.LRU[uint32, *model.Mount] - fallbackLimiter *simplelru.LRU[uint32, time.Time] + fallbackLimiter *utils.Limiter[uint32] // stats cacheHitsStats *atomic.Int64 @@ -230,7 +231,7 @@ func (mr *Resolver) ResolveFilesystem(mountID, pid uint32, containerID string) ( } // Insert a new mount point in the cache -func (mr *Resolver) Insert(e model.Mount, pid uint32, containerID string) error { +func (mr *Resolver) Insert(e model.Mount) error { if e.MountID == 0 { return ErrMountUndefined } @@ -389,23 +390,11 @@ func (mr *Resolver) ResolveMountPath(mountID, pid uint32, containerID string) (s return mr.resolveMountPath(mountID, containerID, pid) } -func (mr *Resolver) isSyncCacheAllowed(mountID uint32) bool { - now := time.Now() - if ts, ok := mr.fallbackLimiter.Get(mountID); ok { - if now.After(ts) { - mr.fallbackLimiter.Remove(mountID) - } else { - return false - } - } - return true -} - func (mr *Resolver) syncCacheMiss(mountID uint32) { mr.procMissStats.Inc() // add to fallback limiter to avoid storm of file access - mr.fallbackLimiter.Add(mountID, time.Now().Add(fallbackLimiterPeriod)) + mr.fallbackLimiter.Count(mountID) } func (mr *Resolver) resolveMountPath(mountID uint32, containerID string, pids ...uint32) (string, error) { @@ -436,7 +425,7 @@ func (mr *Resolver) resolveMountPath(mountID uint32, containerID string, pids .. return "", &ErrMountNotFound{MountID: mountID} } - if !mr.isSyncCacheAllowed(mountID) { + if !mr.fallbackLimiter.IsAllowed(mountID) { return "", &ErrMountNotFound{MountID: mountID} } @@ -491,6 +480,10 @@ func (mr *Resolver) resolveMount(mountID uint32, containerID string, pids ...uin return nil, &ErrMountNotFound{MountID: mountID} } + if !mr.fallbackLimiter.IsAllowed(mountID) { + return nil, &ErrMountNotFound{MountID: mountID} + } + if err := mr.syncCache(pids...); err != nil { mr.syncCacheMiss(mountID) return nil, err @@ -627,11 +620,11 @@ func NewResolver(statsdClient statsd.ClientInterface, cgroupsResolver *cgroup.Re } mr.redemption = redemption - fallbackLimiter, err := simplelru.NewLRU[uint32, time.Time](64, nil) + limiter, err := utils.NewLimiter[uint32](64, fallbackLimiterPeriod) if err != nil { return nil, err } - mr.fallbackLimiter = fallbackLimiter + mr.fallbackLimiter = limiter return mr, nil } diff --git a/pkg/security/resolvers/process/resolver.go b/pkg/security/resolvers/process/resolver.go index 4aaa4b1692a02..78c265b6cd20a 100644 --- a/pkg/security/resolvers/process/resolver.go +++ b/pkg/security/resolvers/process/resolver.go @@ -48,8 +48,9 @@ const ( ) const ( - procResolveMaxDepth = 16 - maxParallelArgsEnvs = 512 // == number of parallel starting processes + procResolveMaxDepth = 16 + maxParallelArgsEnvs = 512 // == number of parallel starting processes + procFallbackLimiterPeriod = 30 * time.Second // proc fallback period by pid ) // ResolverOpts options of resolver @@ -100,6 +101,9 @@ type Resolver struct { processCacheEntryPool *ProcessCacheEntryPool + // limiters + procFallbackLimiter *utils.Limiter[uint32] + exitedQueue []uint32 } @@ -362,7 +366,8 @@ func (p *Resolver) enrichEventFromProc(entry *model.ProcessCacheEntry, proc *pro // resolve container path with the MountResolver entry.FileEvent.Filesystem, err = p.mountResolver.ResolveFilesystem(entry.Process.FileEvent.MountID, entry.Process.Pid, string(containerID)) if err != nil { - return fmt.Errorf("snapshot failed for %d: couldn't get the filesystem: %w", proc.Pid, err) + entry.FileEvent.Filesystem = model.UnknownFS + seclog.Debugf("snapshot failed for %d: couldn't get the filesystem: %s", proc.Pid, err) } entry.ExecTime = time.Unix(0, filledProc.CreateTime*int64(time.Millisecond)) @@ -417,7 +422,7 @@ func (p *Resolver) enrichEventFromProc(entry *model.ProcessCacheEntry, proc *pro // // print "Hello from Perl\n"; // - //EOF + // EOF if values := entry.ArgsEntry.Values; len(values) > 1 { firstArg := values[0] lastArg := values[len(values)-1] @@ -511,7 +516,7 @@ func (p *Resolver) insertForkEntry(entry *model.ProcessCacheEntry, origin uint64 parent := p.entryCache[entry.PPid] if parent == nil && entry.PPid >= 1 { - parent = p.resolve(entry.PPid, entry.PPid, entry.Inode) + parent = p.resolve(entry.PPid, entry.PPid, entry.ExecInode, true) } if parent != nil { @@ -555,14 +560,14 @@ func (p *Resolver) DeleteEntry(pid uint32, exitTime time.Time) { } // Resolve returns the cache entry for the given pid -func (p *Resolver) Resolve(pid, tid uint32, inode uint64) *model.ProcessCacheEntry { +func (p *Resolver) Resolve(pid, tid uint32, inode uint64, useProcFS bool) *model.ProcessCacheEntry { p.Lock() defer p.Unlock() - return p.resolve(pid, tid, inode) + return p.resolve(pid, tid, inode, useProcFS) } -func (p *Resolver) resolve(pid, tid uint32, inode uint64) *model.ProcessCacheEntry { +func (p *Resolver) resolve(pid, tid uint32, inode uint64, useProcFS bool) *model.ProcessCacheEntry { if entry := p.resolveFromCache(pid, tid, inode); entry != nil { p.hitsStats[metrics.CacheTag].Inc() return entry @@ -573,15 +578,24 @@ func (p *Resolver) resolve(pid, tid uint32, inode uint64) *model.ProcessCacheEnt } // fallback to the kernel maps directly, the perf event may be delayed / may have been lost - if entry := p.resolveFromKernelMaps(pid, tid); entry != nil { + if entry := p.resolveFromKernelMaps(pid, tid, inode); entry != nil { p.hitsStats[metrics.KernelMapsTag].Inc() return entry } - // fallback to /proc, the in-kernel LRU may have deleted the entry - if entry := p.resolveFromProcfs(pid, procResolveMaxDepth); entry != nil { - p.hitsStats[metrics.ProcFSTag].Inc() - return entry + if !useProcFS { + p.missStats.Inc() + return nil + } + + if p.procFallbackLimiter.IsAllowed(pid) { + p.procFallbackLimiter.Count(pid) + + // fallback to /proc, the in-kernel LRU may have deleted the entry + if entry := p.resolveFromProcfs(pid, procResolveMaxDepth); entry != nil { + p.hitsStats[metrics.ProcFSTag].Inc() + return entry + } } p.missStats.Inc() @@ -618,15 +632,15 @@ func (p *Resolver) SetProcessPath(fileEvent *model.FileEvent, pidCtx *model.PIDC return fileEvent.PathnameStr, nil } -func isBusybox(pathname string) bool { +func IsBusybox(pathname string) bool { return pathname == "/bin/busybox" || pathname == "/usr/bin/busybox" } // SetProcessSymlink resolves process file symlink path func (p *Resolver) SetProcessSymlink(entry *model.ProcessCacheEntry) { // TODO: busybox workaround only for now - if isBusybox(entry.FileEvent.PathnameStr) { - arg0, _ := p.GetProcessArgv0(&entry.Process) + if IsBusybox(entry.FileEvent.PathnameStr) { + arg0, _ := GetProcessArgv0(&entry.Process) base := path.Base(arg0) entry.SymlinkPathnameStr[0] = "/bin/" + base @@ -711,13 +725,13 @@ func (p *Resolver) ResolveNewProcessCacheEntry(entry *model.ProcessCacheEntry, c } // ResolveFromKernelMaps resolves the entry from the kernel maps -func (p *Resolver) ResolveFromKernelMaps(pid, tid uint32) *model.ProcessCacheEntry { +func (p *Resolver) ResolveFromKernelMaps(pid, tid uint32, inode uint64) *model.ProcessCacheEntry { p.Lock() defer p.Unlock() - return p.resolveFromKernelMaps(pid, tid) + return p.resolveFromKernelMaps(pid, tid, inode) } -func (p *Resolver) resolveFromKernelMaps(pid, tid uint32) *model.ProcessCacheEntry { +func (p *Resolver) resolveFromKernelMaps(pid, tid uint32, inode uint64) *model.ProcessCacheEntry { pidb := make([]byte, 4) model.ByteOrder.PutUint32(pidb, pid) @@ -732,7 +746,7 @@ func (p *Resolver) resolveFromKernelMaps(pid, tid uint32) *model.ProcessCacheEnt return nil } - entry := p.NewProcessCacheEntry(model.PIDContext{Pid: pid, Tid: tid}) + entry := p.NewProcessCacheEntry(model.PIDContext{Pid: pid, Tid: tid, ExecInode: inode}) var ctrCtx model.ContainerContext read, err := ctrCtx.UnmarshalBinary(procCache) @@ -744,6 +758,11 @@ func (p *Resolver) resolveFromKernelMaps(pid, tid uint32) *model.ProcessCacheEnt return nil } + // check that the cache entry correspond to the event + if entry.FileEvent.Inode != entry.ExecInode { + return nil + } + if _, err := entry.UnmarshalPidCacheBinary(pidCache); err != nil { return nil } @@ -787,18 +806,26 @@ func (p *Resolver) ResolveFromProcfs(pid uint32) *model.ProcessCacheEntry { } func (p *Resolver) resolveFromProcfs(pid uint32, maxDepth int) *model.ProcessCacheEntry { - if maxDepth < 1 || pid == 0 { + if maxDepth < 1 { + seclog.Tracef("max depth reached during procfs resolution: %d", pid) + return nil + } + + if pid == 0 { + seclog.Tracef("no pid: %d", pid) return nil } var ppid uint32 proc, err := process.NewProcess(int32(pid)) if err != nil { + seclog.Tracef("unable to find pid: %d", pid) return nil } - filledProc := utils.GetFilledProcess(proc) - if filledProc == nil { + filledProc, err := utils.GetFilledProcess(proc) + if err != nil { + seclog.Tracef("unable to get a filled process for pid %d: %d", pid, err) return nil } @@ -849,29 +876,30 @@ func (p *Resolver) SetProcessArgs(pce *model.ProcessCacheEntry) { // GetProcessArgv returns the args of the event as an array func GetProcessArgv(pr *model.Process) ([]string, bool) { if pr.ArgsEntry == nil { - return nil, false + return pr.Argv, pr.ArgsTruncated } argv := pr.ArgsEntry.Values if len(argv) > 0 { argv = argv[1:] } - - return argv, pr.ArgsTruncated || pr.ArgsEntry.Truncated + pr.Argv = argv + pr.ArgsTruncated = pr.ArgsTruncated || pr.ArgsEntry.Truncated + return pr.Argv, pr.ArgsTruncated } -// GetProcessArgv0 returns the first arg of the event -func (p *Resolver) GetProcessArgv0(pr *model.Process) (string, bool) { +// GetProcessArgv0 returns the first arg of the event and whether the process arguments are truncated +func GetProcessArgv0(pr *model.Process) (string, bool) { if pr.ArgsEntry == nil { - return "", false + return pr.Argv0, pr.ArgsTruncated } argv := pr.ArgsEntry.Values if len(argv) > 0 { - return argv[0], pr.ArgsTruncated || pr.ArgsEntry.Truncated + pr.Argv0 = argv[0] } - - return "", pr.ArgsTruncated || pr.ArgsEntry.Truncated + pr.ArgsTruncated = pr.ArgsTruncated || pr.ArgsEntry.Truncated + return pr.Argv0, pr.ArgsTruncated } // GetProcessScrubbedArgv returns the scrubbed args of the event as an array @@ -915,21 +943,24 @@ func (p *Resolver) SetProcessEnvs(pce *model.ProcessCacheEntry) { // GetProcessEnvs returns the envs of the event func (p *Resolver) GetProcessEnvs(pr *model.Process) ([]string, bool) { if pr.EnvsEntry == nil { - return nil, false + return pr.Envs, pr.EnvsTruncated } keys, truncated := pr.EnvsEntry.FilterEnvs(p.opts.envsWithValue) - - return keys, pr.EnvsTruncated || truncated + pr.Envs = keys + pr.EnvsTruncated = pr.EnvsTruncated || truncated + return pr.Envs, pr.EnvsTruncated } // GetProcessEnvp returns the envs of the event with their values func (p *Resolver) GetProcessEnvp(pr *model.Process) ([]string, bool) { if pr.EnvsEntry == nil { - return nil, false + return pr.Envp, pr.EnvsTruncated } - return pr.EnvsEntry.Values, pr.EnvsTruncated || pr.EnvsEntry.Truncated + pr.Envp = pr.EnvsEntry.Values + pr.EnvsTruncated = pr.EnvsTruncated || pr.EnvsEntry.Truncated + return pr.Envp, pr.EnvsTruncated } // SetProcessTTY resolves TTY and cache the result @@ -1070,8 +1101,9 @@ func (p *Resolver) SyncCache(proc *process.Process) bool { p.Lock() defer p.Unlock() - filledProc := utils.GetFilledProcess(proc) - if filledProc == nil { + filledProc, err := utils.GetFilledProcess(proc) + if err != nil { + seclog.Tracef("unable to get a filled process for %d: %v", proc.Pid, err) return false } @@ -1280,6 +1312,12 @@ func NewResolver(manager *manager.Manager, config *config.Config, statsdClient s } p.processCacheEntryPool = NewProcessCacheEntryPool(p) + limiter, err := utils.NewLimiter[uint32](128, procFallbackLimiterPeriod) + if err != nil { + return nil, err + } + p.procFallbackLimiter = limiter + return p, nil } diff --git a/pkg/security/secl/compiler/ast/secl.go b/pkg/security/secl/compiler/ast/secl.go index 7152615d6b11a..55534e9810559 100644 --- a/pkg/security/secl/compiler/ast/secl.go +++ b/pkg/security/secl/compiler/ast/secl.go @@ -141,9 +141,9 @@ type Expression struct { type Comparison struct { Pos lexer.Position - BitOperation *BitOperation `parser:"@@"` - ScalarComparison *ScalarComparison `parser:"[ @@"` - ArrayComparison *ArrayComparison `parser:"| @@ ]"` + ArithmeticOperation *ArithmeticOperation `parser:"@@"` + ScalarComparison *ScalarComparison `parser:"[ @@"` + ArrayComparison *ArrayComparison `parser:"| @@ ]"` } // ScalarComparison describes a scalar comparison : the operator with the right operand @@ -171,6 +171,18 @@ type BitOperation struct { Next *BitOperation `parser:"@@ ]"` } +type ArithmeticOperation struct { + Pos lexer.Position + + First *BitOperation `parser:"@@"` + Rest []*ArithmeticElement `parser:"[ @@ { @@ } ]"` +} + +type ArithmeticElement struct { + Op string `parser:"@( \"+\" | \"-\" )"` + Operand *BitOperation `parser:"@@"` +} + // Unary describes an unary operation like logical not, binary not, minus type Unary struct { Pos lexer.Position diff --git a/pkg/security/secl/compiler/eval/eval.go b/pkg/security/secl/compiler/eval/eval.go index 20efbd0168e49..a36ab6067f49b 100644 --- a/pkg/security/secl/compiler/eval/eval.go +++ b/pkg/security/secl/compiler/eval/eval.go @@ -473,8 +473,58 @@ func nodeToEvaluator(obj interface{}, opts *Opts, state *State) (interface{}, le } return unary, obj.Pos, nil + case *ast.ArithmeticOperation: + // Process the first operand + first, pos, err := nodeToEvaluator(obj.First, opts, state) + if err != nil { + return nil, pos, err + } + + // If it's just one element (is a bitoperation: maybe a string, an int ....) + if len(obj.Rest) == 0 { + return first, obj.Pos, nil + } + + // Else it's an operation, so it must be an int + currInt, ok := first.(*IntEvaluator) + if !ok { + return nil, obj.Pos, NewTypeError(obj.Pos, reflect.Int) + } + + // Process the remaining operations and operands + for _, arithElem := range obj.Rest { + // Handle the operand + operand, pos, err := nodeToEvaluator(arithElem.Operand, opts, state) + if err != nil { + return nil, pos, err + } + operandInt, ok := operand.(*IntEvaluator) + if !ok { + return nil, pos, NewTypeError(pos, reflect.Int) + } + + // Perform the operation on the current and next operands + switch arithElem.Op { + case "+": + currInt, err = IntPlus(currInt, operandInt, state) + if err != nil { + return nil, pos, err + } + + case "-": + currInt, err = IntMinus(currInt, operandInt, state) + if err != nil { + return nil, pos, err + } + } + } + + // Return the final result after processing all operations and operands + currInt.isFromArithmeticOperation = true + return currInt, obj.Pos, nil + case *ast.Comparison: - unary, pos, err = nodeToEvaluator(obj.BitOperation, opts, state) + unary, pos, err = nodeToEvaluator(obj.ArithmeticOperation, opts, state) if err != nil { return nil, pos, err } @@ -880,31 +930,73 @@ func nodeToEvaluator(obj interface{}, opts *Opts, state *State) (interface{}, le switch nextInt := next.(type) { case *IntEvaluator: if nextInt.isDuration { - switch *obj.ScalarComparison.Op { - case "<": - boolEvaluator, err = DurationLesserThan(unary, nextInt, state) - if err != nil { - return nil, obj.Pos, err - } - return boolEvaluator, obj.Pos, nil - case "<=": - boolEvaluator, err = DurationLesserOrEqualThan(unary, nextInt, state) - if err != nil { - return nil, obj.Pos, err - } - return boolEvaluator, obj.Pos, nil - case ">": - boolEvaluator, err = DurationGreaterThan(unary, nextInt, state) - if err != nil { - return nil, obj.Pos, err + if unary.isFromArithmeticOperation { + switch *obj.ScalarComparison.Op { + case "<": + boolEvaluator, err = DurationLesserThanArithmeticOperation(unary, nextInt, state) + if err != nil { + return nil, obj.Pos, err + } + return boolEvaluator, obj.Pos, nil + case "<=": + boolEvaluator, err = DurationLesserOrEqualThanArithmeticOperation(unary, nextInt, state) + if err != nil { + return nil, obj.Pos, err + } + return boolEvaluator, obj.Pos, nil + case ">": + boolEvaluator, err = DurationGreaterThanArithmeticOperation(unary, nextInt, state) + if err != nil { + return nil, obj.Pos, err + } + return boolEvaluator, obj.Pos, nil + case ">=": + boolEvaluator, err = DurationGreaterOrEqualThanArithmeticOperation(unary, nextInt, state) + if err != nil { + return nil, obj.Pos, err + } + return boolEvaluator, obj.Pos, nil + case "==": + boolEvaluator, err = DurationEqualArithmeticOperation(unary, nextInt, state) + if err != nil { + return nil, obj.Pos, err + } + return boolEvaluator, obj.Pos, nil } - return boolEvaluator, obj.Pos, nil - case ">=": - boolEvaluator, err = DurationGreaterOrEqualThan(unary, nextInt, state) - if err != nil { - return nil, obj.Pos, err + + } else { + switch *obj.ScalarComparison.Op { + case "<": + boolEvaluator, err = DurationLesserThan(unary, nextInt, state) + if err != nil { + return nil, obj.Pos, err + } + return boolEvaluator, obj.Pos, nil + case "<=": + boolEvaluator, err = DurationLesserOrEqualThan(unary, nextInt, state) + if err != nil { + return nil, obj.Pos, err + } + return boolEvaluator, obj.Pos, nil + case ">": + boolEvaluator, err = DurationGreaterThan(unary, nextInt, state) + if err != nil { + return nil, obj.Pos, err + } + return boolEvaluator, obj.Pos, nil + case ">=": + boolEvaluator, err = DurationGreaterOrEqualThan(unary, nextInt, state) + if err != nil { + return nil, obj.Pos, err + } + return boolEvaluator, obj.Pos, nil + case "==": + boolEvaluator, err = DurationEqual(unary, nextInt, state) + if err != nil { + return nil, obj.Pos, err + } + return boolEvaluator, obj.Pos, nil } - return boolEvaluator, obj.Pos, nil } } else { switch *obj.ScalarComparison.Op { diff --git a/pkg/security/secl/compiler/eval/eval_operators.go b/pkg/security/secl/compiler/eval/eval_operators.go index 40c54ce5261ab..9b8f3ee330b0f 100644 --- a/pkg/security/secl/compiler/eval/eval_operators.go +++ b/pkg/security/secl/compiler/eval/eval_operators.go @@ -260,6 +260,148 @@ func IntXor(a *IntEvaluator, b *IntEvaluator, state *State) (*IntEvaluator, erro }, nil } +func IntPlus(a *IntEvaluator, b *IntEvaluator, state *State) (*IntEvaluator, error) { + + isDc := isArithmDeterministic(a, b, state) + + if a.Field != "" { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + return nil, err + } + } + + if b.Field != "" { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + return nil, err + } + } + + if a.EvalFnc != nil && b.EvalFnc != nil { + ea, eb := a.EvalFnc, b.EvalFnc + + evalFnc := func(ctx *Context) int { + return ea(ctx) + eb(ctx) + } + + return &IntEvaluator{ + EvalFnc: evalFnc, + Weight: a.Weight + b.Weight, + isDeterministic: isDc, + }, nil + } + + if a.EvalFnc == nil && b.EvalFnc == nil { + ea, eb := a.Value, b.Value + + ctx := NewContext(nil) + _ = ctx + + return &IntEvaluator{ + Value: ea + eb, + isDeterministic: isDc, + }, nil + } + + if a.EvalFnc != nil { + ea, eb := a.EvalFnc, b.Value + + evalFnc := func(ctx *Context) int { + return ea(ctx) + eb + } + + return &IntEvaluator{ + EvalFnc: evalFnc, + Field: a.Field, + Weight: a.Weight, + isDeterministic: isDc, + }, nil + } + + ea, eb := a.Value, b.EvalFnc + + evalFnc := func(ctx *Context) int { + return ea + eb(ctx) + } + + return &IntEvaluator{ + EvalFnc: evalFnc, + Field: b.Field, + Weight: b.Weight, + isDeterministic: isDc, + }, nil +} + +func IntMinus(a *IntEvaluator, b *IntEvaluator, state *State) (*IntEvaluator, error) { + + isDc := isArithmDeterministic(a, b, state) + + if a.Field != "" { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + return nil, err + } + } + + if b.Field != "" { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + return nil, err + } + } + + if a.EvalFnc != nil && b.EvalFnc != nil { + ea, eb := a.EvalFnc, b.EvalFnc + + evalFnc := func(ctx *Context) int { + return ea(ctx) - eb(ctx) + } + + return &IntEvaluator{ + EvalFnc: evalFnc, + Weight: a.Weight + b.Weight, + isDeterministic: isDc, + }, nil + } + + if a.EvalFnc == nil && b.EvalFnc == nil { + ea, eb := a.Value, b.Value + + ctx := NewContext(nil) + _ = ctx + + return &IntEvaluator{ + Value: ea - eb, + isDeterministic: isDc, + }, nil + } + + if a.EvalFnc != nil { + ea, eb := a.EvalFnc, b.Value + + evalFnc := func(ctx *Context) int { + return ea(ctx) - eb + } + + return &IntEvaluator{ + EvalFnc: evalFnc, + Field: a.Field, + Weight: a.Weight, + isDeterministic: isDc, + }, nil + } + + ea, eb := a.Value, b.EvalFnc + + evalFnc := func(ctx *Context) int { + return ea - eb(ctx) + } + + return &IntEvaluator{ + EvalFnc: evalFnc, + Field: b.Field, + Weight: b.Weight, + isDeterministic: isDc, + }, nil +} + func BoolEquals(a *BoolEvaluator, b *BoolEvaluator, state *State) (*BoolEvaluator, error) { isDc := isArithmDeterministic(a, b, state) @@ -899,6 +1041,432 @@ func DurationGreaterOrEqualThan(a *IntEvaluator, b *IntEvaluator, state *State) }, nil } +func DurationEqual(a *IntEvaluator, b *IntEvaluator, state *State) (*BoolEvaluator, error) { + + isDc := isArithmDeterministic(a, b, state) + + if a.Field != "" { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + return nil, err + } + } + + if b.Field != "" { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + return nil, err + } + } + + if a.EvalFnc != nil && b.EvalFnc != nil { + ea, eb := a.EvalFnc, b.EvalFnc + + evalFnc := func(ctx *Context) bool { + return ctx.Now().UnixNano()-int64(ea(ctx)) == int64(eb(ctx)) + } + + return &BoolEvaluator{ + EvalFnc: evalFnc, + Weight: a.Weight + b.Weight, + isDeterministic: isDc, + }, nil + } + + if a.EvalFnc == nil && b.EvalFnc == nil { + ea, eb := a.Value, b.Value + + ctx := NewContext(nil) + _ = ctx + + return &BoolEvaluator{ + Value: ctx.Now().UnixNano()-int64(ea) == int64(eb), + isDeterministic: isDc, + }, nil + } + + if a.EvalFnc != nil { + ea, eb := a.EvalFnc, b.Value + + evalFnc := func(ctx *Context) bool { + return ctx.Now().UnixNano()-int64(ea(ctx)) == int64(eb) + } + + return &BoolEvaluator{ + EvalFnc: evalFnc, + Field: a.Field, + Weight: a.Weight, + isDeterministic: isDc, + }, nil + } + + ea, eb := a.Value, b.EvalFnc + + evalFnc := func(ctx *Context) bool { + return ctx.Now().UnixNano()-int64(ea) == int64(eb(ctx)) + } + + return &BoolEvaluator{ + EvalFnc: evalFnc, + Field: b.Field, + Weight: b.Weight, + isDeterministic: isDc, + }, nil +} + +func DurationLesserThanArithmeticOperation(a *IntEvaluator, b *IntEvaluator, state *State) (*BoolEvaluator, error) { + + isDc := isArithmDeterministic(a, b, state) + + if a.Field != "" { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + return nil, err + } + } + + if b.Field != "" { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + return nil, err + } + } + + if a.EvalFnc != nil && b.EvalFnc != nil { + ea, eb := a.EvalFnc, b.EvalFnc + + evalFnc := func(ctx *Context) bool { + return int64(ea(ctx)) < int64(eb(ctx)) + } + + return &BoolEvaluator{ + EvalFnc: evalFnc, + Weight: a.Weight + b.Weight, + isDeterministic: isDc, + }, nil + } + + if a.EvalFnc == nil && b.EvalFnc == nil { + ea, eb := a.Value, b.Value + + ctx := NewContext(nil) + _ = ctx + + return &BoolEvaluator{ + Value: int64(ea) < int64(eb), + isDeterministic: isDc, + }, nil + } + + if a.EvalFnc != nil { + ea, eb := a.EvalFnc, b.Value + + evalFnc := func(ctx *Context) bool { + return int64(ea(ctx)) < int64(eb) + } + + return &BoolEvaluator{ + EvalFnc: evalFnc, + Field: a.Field, + Weight: a.Weight, + isDeterministic: isDc, + }, nil + } + + ea, eb := a.Value, b.EvalFnc + + evalFnc := func(ctx *Context) bool { + return int64(ea) < int64(eb(ctx)) + } + + return &BoolEvaluator{ + EvalFnc: evalFnc, + Field: b.Field, + Weight: b.Weight, + isDeterministic: isDc, + }, nil +} + +func DurationLesserOrEqualThanArithmeticOperation(a *IntEvaluator, b *IntEvaluator, state *State) (*BoolEvaluator, error) { + + isDc := isArithmDeterministic(a, b, state) + + if a.Field != "" { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + return nil, err + } + } + + if b.Field != "" { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + return nil, err + } + } + + if a.EvalFnc != nil && b.EvalFnc != nil { + ea, eb := a.EvalFnc, b.EvalFnc + + evalFnc := func(ctx *Context) bool { + return int64(ea(ctx)) <= int64(eb(ctx)) + } + + return &BoolEvaluator{ + EvalFnc: evalFnc, + Weight: a.Weight + b.Weight, + isDeterministic: isDc, + }, nil + } + + if a.EvalFnc == nil && b.EvalFnc == nil { + ea, eb := a.Value, b.Value + + ctx := NewContext(nil) + _ = ctx + + return &BoolEvaluator{ + Value: int64(ea) <= int64(eb), + isDeterministic: isDc, + }, nil + } + + if a.EvalFnc != nil { + ea, eb := a.EvalFnc, b.Value + + evalFnc := func(ctx *Context) bool { + return int64(ea(ctx)) <= int64(eb) + } + + return &BoolEvaluator{ + EvalFnc: evalFnc, + Field: a.Field, + Weight: a.Weight, + isDeterministic: isDc, + }, nil + } + + ea, eb := a.Value, b.EvalFnc + + evalFnc := func(ctx *Context) bool { + return int64(ea) <= int64(eb(ctx)) + } + + return &BoolEvaluator{ + EvalFnc: evalFnc, + Field: b.Field, + Weight: b.Weight, + isDeterministic: isDc, + }, nil +} + +func DurationGreaterThanArithmeticOperation(a *IntEvaluator, b *IntEvaluator, state *State) (*BoolEvaluator, error) { + + isDc := isArithmDeterministic(a, b, state) + + if a.Field != "" { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + return nil, err + } + } + + if b.Field != "" { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + return nil, err + } + } + + if a.EvalFnc != nil && b.EvalFnc != nil { + ea, eb := a.EvalFnc, b.EvalFnc + + evalFnc := func(ctx *Context) bool { + return int64(ea(ctx)) > int64(eb(ctx)) + } + + return &BoolEvaluator{ + EvalFnc: evalFnc, + Weight: a.Weight + b.Weight, + isDeterministic: isDc, + }, nil + } + + if a.EvalFnc == nil && b.EvalFnc == nil { + ea, eb := a.Value, b.Value + + ctx := NewContext(nil) + _ = ctx + + return &BoolEvaluator{ + Value: int64(ea) > int64(eb), + isDeterministic: isDc, + }, nil + } + + if a.EvalFnc != nil { + ea, eb := a.EvalFnc, b.Value + + evalFnc := func(ctx *Context) bool { + return int64(ea(ctx)) > int64(eb) + } + + return &BoolEvaluator{ + EvalFnc: evalFnc, + Field: a.Field, + Weight: a.Weight, + isDeterministic: isDc, + }, nil + } + + ea, eb := a.Value, b.EvalFnc + + evalFnc := func(ctx *Context) bool { + return int64(ea) > int64(eb(ctx)) + } + + return &BoolEvaluator{ + EvalFnc: evalFnc, + Field: b.Field, + Weight: b.Weight, + isDeterministic: isDc, + }, nil +} + +func DurationGreaterOrEqualThanArithmeticOperation(a *IntEvaluator, b *IntEvaluator, state *State) (*BoolEvaluator, error) { + + isDc := isArithmDeterministic(a, b, state) + + if a.Field != "" { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + return nil, err + } + } + + if b.Field != "" { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + return nil, err + } + } + + if a.EvalFnc != nil && b.EvalFnc != nil { + ea, eb := a.EvalFnc, b.EvalFnc + + evalFnc := func(ctx *Context) bool { + return int64(ea(ctx)) >= int64(eb(ctx)) + } + + return &BoolEvaluator{ + EvalFnc: evalFnc, + Weight: a.Weight + b.Weight, + isDeterministic: isDc, + }, nil + } + + if a.EvalFnc == nil && b.EvalFnc == nil { + ea, eb := a.Value, b.Value + + ctx := NewContext(nil) + _ = ctx + + return &BoolEvaluator{ + Value: int64(ea) >= int64(eb), + isDeterministic: isDc, + }, nil + } + + if a.EvalFnc != nil { + ea, eb := a.EvalFnc, b.Value + + evalFnc := func(ctx *Context) bool { + return int64(ea(ctx)) >= int64(eb) + } + + return &BoolEvaluator{ + EvalFnc: evalFnc, + Field: a.Field, + Weight: a.Weight, + isDeterministic: isDc, + }, nil + } + + ea, eb := a.Value, b.EvalFnc + + evalFnc := func(ctx *Context) bool { + return int64(ea) >= int64(eb(ctx)) + } + + return &BoolEvaluator{ + EvalFnc: evalFnc, + Field: b.Field, + Weight: b.Weight, + isDeterministic: isDc, + }, nil +} + +func DurationEqualArithmeticOperation(a *IntEvaluator, b *IntEvaluator, state *State) (*BoolEvaluator, error) { + + isDc := isArithmDeterministic(a, b, state) + + if a.Field != "" { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + return nil, err + } + } + + if b.Field != "" { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + return nil, err + } + } + + if a.EvalFnc != nil && b.EvalFnc != nil { + ea, eb := a.EvalFnc, b.EvalFnc + + evalFnc := func(ctx *Context) bool { + return int64(ea(ctx)) == int64(eb(ctx)) + } + + return &BoolEvaluator{ + EvalFnc: evalFnc, + Weight: a.Weight + b.Weight, + isDeterministic: isDc, + }, nil + } + + if a.EvalFnc == nil && b.EvalFnc == nil { + ea, eb := a.Value, b.Value + + ctx := NewContext(nil) + _ = ctx + + return &BoolEvaluator{ + Value: int64(ea) == int64(eb), + isDeterministic: isDc, + }, nil + } + + if a.EvalFnc != nil { + ea, eb := a.EvalFnc, b.Value + + evalFnc := func(ctx *Context) bool { + return int64(ea(ctx)) == int64(eb) + } + + return &BoolEvaluator{ + EvalFnc: evalFnc, + Field: a.Field, + Weight: a.Weight, + isDeterministic: isDc, + }, nil + } + + ea, eb := a.Value, b.EvalFnc + + evalFnc := func(ctx *Context) bool { + return int64(ea) == int64(eb(ctx)) + } + + return &BoolEvaluator{ + EvalFnc: evalFnc, + Field: b.Field, + Weight: b.Weight, + isDeterministic: isDc, + }, nil +} + func IntArrayEquals(a *IntEvaluator, b *IntArrayEvaluator, state *State) (*BoolEvaluator, error) { isDc := isArithmDeterministic(a, b, state) diff --git a/pkg/security/secl/compiler/eval/evaluators.go b/pkg/security/secl/compiler/eval/evaluators.go index 4c6dc00fd08ac..4766363da55c2 100644 --- a/pkg/security/secl/compiler/eval/evaluators.go +++ b/pkg/security/secl/compiler/eval/evaluators.go @@ -60,8 +60,9 @@ type IntEvaluator struct { OpOverrides *OpOverrides // used during compilation of partial - isDeterministic bool - isDuration bool + isDeterministic bool + isDuration bool + isFromArithmeticOperation bool } // Eval returns the result of the evaluation diff --git a/pkg/security/secl/compiler/generators/operators/operators.go b/pkg/security/secl/compiler/generators/operators/operators.go index bba532397554c..f826fe42cb76d 100644 --- a/pkg/security/secl/compiler/generators/operators/operators.go +++ b/pkg/security/secl/compiler/generators/operators/operators.go @@ -111,7 +111,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ ctx := NewContext(nil) _ = ctx - + return &{{ .FuncReturnType }}{ Value: {{ call .Op "ea" "eb" }}, isDeterministic: isDc, @@ -273,6 +273,12 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ } } + durationCompareArithmeticOperation := func(op string) func(a string, b string) string { + return func(a string, b string) string { + return fmt.Sprintf("int64(%s) %s int64(%s)", a, op, b) + } + } + durationCompare := func(op string) func(a string, b string) string { return func(a string, b string) string { return fmt.Sprintf("ctx.Now().UnixNano() - int64(%s) %s int64(%s)", a, op, b) @@ -320,6 +326,24 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ Op: stdCompare("^"), ValueType: "BitmaskValueType", }, + { + FuncName: "IntPlus", + Arg1Type: "IntEvaluator", + Arg2Type: "IntEvaluator", + FuncReturnType: "IntEvaluator", + EvalReturnType: "int", + Op: stdCompare("+"), + ValueType: "ScalarValueType", + }, + { + FuncName: "IntMinus", + Arg1Type: "IntEvaluator", + Arg2Type: "IntEvaluator", + FuncReturnType: "IntEvaluator", + EvalReturnType: "int", + Op: stdCompare("-"), + ValueType: "ScalarValueType", + }, { FuncName: "BoolEquals", Arg1Type: "BoolEvaluator", @@ -401,6 +425,60 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ Op: durationCompare(">="), ValueType: "ScalarValueType", }, + { + FuncName: "DurationEqual", + Arg1Type: "IntEvaluator", + Arg2Type: "IntEvaluator", + FuncReturnType: "BoolEvaluator", + EvalReturnType: "bool", + Op: durationCompare("=="), + ValueType: "ScalarValueType", + }, + { + FuncName: "DurationLesserThanArithmeticOperation", + Arg1Type: "IntEvaluator", + Arg2Type: "IntEvaluator", + FuncReturnType: "BoolEvaluator", + EvalReturnType: "bool", + Op: durationCompareArithmeticOperation("<"), + ValueType: "ScalarValueType", + }, + { + FuncName: "DurationLesserOrEqualThanArithmeticOperation", + Arg1Type: "IntEvaluator", + Arg2Type: "IntEvaluator", + FuncReturnType: "BoolEvaluator", + EvalReturnType: "bool", + Op: durationCompareArithmeticOperation("<="), + ValueType: "ScalarValueType", + }, + { + FuncName: "DurationGreaterThanArithmeticOperation", + Arg1Type: "IntEvaluator", + Arg2Type: "IntEvaluator", + FuncReturnType: "BoolEvaluator", + EvalReturnType: "bool", + Op: durationCompareArithmeticOperation(">"), + ValueType: "ScalarValueType", + }, + { + FuncName: "DurationGreaterOrEqualThanArithmeticOperation", + Arg1Type: "IntEvaluator", + Arg2Type: "IntEvaluator", + FuncReturnType: "BoolEvaluator", + EvalReturnType: "bool", + Op: durationCompareArithmeticOperation(">="), + ValueType: "ScalarValueType", + }, + { + FuncName: "DurationEqualArithmeticOperation", + Arg1Type: "IntEvaluator", + Arg2Type: "IntEvaluator", + FuncReturnType: "BoolEvaluator", + EvalReturnType: "bool", + Op: durationCompareArithmeticOperation("=="), + ValueType: "ScalarValueType", + }, }, ArrayOperators: []Operator{ { diff --git a/pkg/security/secl/go.mod b/pkg/security/secl/go.mod index d90bc20260d32..365a2c711df4c 100644 --- a/pkg/security/secl/go.mod +++ b/pkg/security/secl/go.mod @@ -13,7 +13,7 @@ require ( github.com/hashicorp/golang-lru/v2 v2.0.2 github.com/skydive-project/go-debouncer v1.0.0 github.com/spf13/cast v1.5.1 - github.com/stretchr/testify v1.8.2 + github.com/stretchr/testify v1.8.3 golang.org/x/exp v0.0.0-20221114191408-850992195362 golang.org/x/sys v0.8.0 golang.org/x/tools v0.9.1 diff --git a/pkg/security/secl/go.sum b/pkg/security/secl/go.sum index 511c9c82d6f74..7f26045cddb5d 100644 --- a/pkg/security/secl/go.sum +++ b/pkg/security/secl/go.sum @@ -53,15 +53,11 @@ github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -108,6 +104,5 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/pkg/security/secl/model/consts_map_names.go b/pkg/security/secl/model/consts_map_names.go index 7844aef76aef2..0e6b5f84d2b03 100644 --- a/pkg/security/secl/model/consts_map_names.go +++ b/pkg/security/secl/model/consts_map_names.go @@ -28,8 +28,6 @@ var bpfMapNames = []string{ "events", "events_ringbuf_", "events_stats", - "exec_count_bb", - "exec_count_fb", "exec_file_cache", "exec_pid_transf", "fb_approver_sta", diff --git a/pkg/security/secl/model/model.go b/pkg/security/secl/model/model.go index 5771741a63658..be343d4ee77ed 100644 --- a/pkg/security/secl/model/model.go +++ b/pkg/security/secl/model/model.go @@ -28,6 +28,7 @@ import ( const ( // OverlayFS overlay filesystem OverlayFS = "overlay" + UnknownFS = "unknown" ) // Model describes the data model for the runtime security agent events @@ -710,6 +711,11 @@ func (e *FileEvent) GetPathResolutionError() string { return "" } +// IsOverlayFS returns whether it is an overlay fs +func (e *FileEvent) IsOverlayFS() bool { + return e.Filesystem == "overlay" +} + // InvalidateDentryEvent defines a invalidate dentry event type InvalidateDentryEvent struct { Inode uint64 @@ -933,7 +939,7 @@ type PIDContext struct { Tid uint32 `field:"tid"` // SECLDoc[tid] Definition:`Thread ID of the thread` NetNS uint32 `field:"-"` IsKworker bool `field:"is_kworker"` // SECLDoc[is_kworker] Definition:`Indicates whether the process is a kworker` - Inode uint64 `field:"-"` // used to track exec and event loss + ExecInode uint64 `field:"-"` // used to track exec and event loss } // RenameEvent represents a rename event diff --git a/pkg/security/secl/model/process_cache_entry.go b/pkg/security/secl/model/process_cache_entry.go index 2e73862f4e573..52a8490c9d217 100644 --- a/pkg/security/secl/model/process_cache_entry.go +++ b/pkg/security/secl/model/process_cache_entry.go @@ -39,28 +39,6 @@ func (pc *ProcessCacheEntry) SetAncestor(parent *ProcessCacheEntry) { parent.Retain() } -// GetNextAncestorBinary returns the first ancestor with a different binary -func (pc *ProcessContext) GetNextAncestorBinary() *ProcessCacheEntry { - current := pc - ancestor := pc.Ancestor - for ancestor != nil { - if ancestor.Inode == 0 { - return nil - } - if current.Inode != ancestor.Inode { - return ancestor - } - current = &ancestor.ProcessContext - ancestor = ancestor.Ancestor - } - return nil -} - -// GetNextAncestorBinary returns the first ancestor with a different binary -func (pc *ProcessCacheEntry) GetNextAncestorBinary() *ProcessCacheEntry { - return pc.ProcessContext.GetNextAncestorBinary() -} - // HasCompleteLineage returns false if, from the entry, we cannot ascend the ancestors list to PID 1 func (pc *ProcessCacheEntry) HasCompleteLineage() bool { for pc != nil { @@ -132,7 +110,17 @@ func (pc *ProcessCacheEntry) Equals(entry *ProcessCacheEntry) bool { // NewEmptyProcessCacheEntry returns an empty process cache entry for kworker events or failed process resolutions func NewEmptyProcessCacheEntry(pid uint32, tid uint32, isKworker bool) *ProcessCacheEntry { - return &ProcessCacheEntry{ProcessContext: ProcessContext{Process: Process{PIDContext: PIDContext{Pid: pid, Tid: tid, IsKworker: isKworker}}}} + entry := &ProcessCacheEntry{ProcessContext: ProcessContext{Process: Process{PIDContext: PIDContext{Pid: pid, Tid: tid, IsKworker: isKworker}}}} + + // mark file path as resolved + entry.FileEvent.SetPathnameStr("") + entry.FileEvent.SetBasenameStr("") + + // mark interpreter as resolved too + entry.LinuxBinprm.FileEvent.SetPathnameStr("") + entry.LinuxBinprm.FileEvent.SetBasenameStr("") + + return entry } // ArgsEnvs raw value for args and envs diff --git a/pkg/security/secl/model/unmarshallers.go b/pkg/security/secl/model/unmarshallers.go index 59357f2960a59..1cfdc71e402e2 100644 --- a/pkg/security/secl/model/unmarshallers.go +++ b/pkg/security/secl/model/unmarshallers.go @@ -510,7 +510,7 @@ func (p *PIDContext) UnmarshalBinary(data []byte) (int, error) { p.Tid = ByteOrder.Uint32(data[4:8]) p.NetNS = ByteOrder.Uint32(data[8:12]) p.IsKworker = ByteOrder.Uint32(data[12:16]) > 0 - p.Inode = ByteOrder.Uint64(data[16:24]) + p.ExecInode = ByteOrder.Uint64(data[16:24]) return 24, nil } diff --git a/pkg/security/secl/rules/evaluation_set.go b/pkg/security/secl/rules/evaluation_set.go index 2da4143ffec3f..3cf906f57f77e 100644 --- a/pkg/security/secl/rules/evaluation_set.go +++ b/pkg/security/secl/rules/evaluation_set.go @@ -7,6 +7,7 @@ package rules import ( "fmt" + "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/ast" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/hashicorp/go-multierror" @@ -53,13 +54,18 @@ func (ps *EvaluationSet) GetPolicies() []*Policy { return policies } +type ruleIndexEntry struct { + value eval.RuleSetTagValue + ruleID eval.RuleID +} + func (es *EvaluationSet) LoadPolicies(loader *PolicyLoader, opts PolicyLoaderOpts) *multierror.Error { var ( errs *multierror.Error rules = make(map[eval.RuleSetTagValue][]*RuleDefinition) allMacros []*MacroDefinition macroIndex = make(map[string]*MacroDefinition) - rulesIndex = make(map[eval.RuleSetTagValue]map[eval.RuleID]*RuleDefinition) + rulesIndex = make(map[ruleIndexEntry]*RuleDefinition) ) parsingContext := ast.NewParsingContext() @@ -90,16 +96,12 @@ func (es *EvaluationSet) LoadPolicies(loader *PolicyLoader, opts PolicyLoaderOpt tagValue = DefaultRuleSetTagValue } - if _, ok := rulesIndex[tagValue]; !ok { - rulesIndex[tagValue] = make(map[string]*RuleDefinition) - } - - if existingRule := rulesIndex[tagValue][rule.ID]; existingRule != nil { + if existingRule := rulesIndex[ruleIndexEntry{tagValue, rule.ID}]; existingRule != nil { if err := existingRule.MergeWith(rule); err != nil { errs = multierror.Append(errs, err) } } else { - rulesIndex[tagValue][rule.ID] = rule + rulesIndex[ruleIndexEntry{tagValue, rule.ID}] = rule rules[tagValue] = append(rules[tagValue], rule) } } diff --git a/pkg/security/security_profile/activity_tree/activity_tree.go b/pkg/security/security_profile/activity_tree/activity_tree.go index 7b84d599efe86..33ea029611b97 100644 --- a/pkg/security/security_profile/activity_tree/activity_tree.go +++ b/pkg/security/security_profile/activity_tree/activity_tree.go @@ -85,6 +85,7 @@ type ActivityTree struct { treeType string differentiateArgs bool + DNSMatchMaxDepth int validator ActivityTreeOwner @@ -121,6 +122,42 @@ func (at *ActivityTree) ComputeSyscallsList() []uint32 { return output } +// ComputeActivityTreeStats computes the initial counts of the activity tree stats +func (at *ActivityTree) ComputeActivityTreeStats() { + pnodes := at.ProcessNodes + var fnodes []*FileNode + + for len(pnodes) > 0 { + node := pnodes[0] + + at.Stats.ProcessNodes += 1 + pnodes = append(pnodes, node.Children...) + + at.Stats.DNSNodes += int64(len(node.DNSNames)) + at.Stats.SocketNodes += int64(len(node.Sockets)) + + for _, f := range node.Files { + fnodes = append(fnodes, f) + } + + pnodes = pnodes[1:] + } + + for len(fnodes) > 0 { + node := fnodes[0] + + if node.File != nil { + at.Stats.FileNodes += 1 + } + + for _, f := range node.Children { + fnodes = append(fnodes, f) + } + + fnodes = fnodes[1:] + } +} + // IsEmpty returns true if the tree is empty func (at *ActivityTree) IsEmpty() bool { return len(at.ProcessNodes) == 0 @@ -151,12 +188,6 @@ func (at *ActivityTree) DifferentiateArgs() { at.differentiateArgs = true } -// isValidRootNode evaluates if the provided process entry is allowed to become a root node of an Activity Dump -func (at *ActivityTree) isValidRootNode(entry *model.ProcessContext) bool { - // TODO: evaluate if the same issue affects other container runtimes - return !(strings.HasPrefix(entry.FileEvent.BasenameStr, "runc") || strings.HasPrefix(entry.FileEvent.BasenameStr, "containerd-shim")) -} - // isEventValid evaluates if the provided event is valid func (at *ActivityTree) isEventValid(event *model.Event, dryRun bool) (bool, error) { // check event type @@ -243,7 +274,7 @@ func (at *ActivityTree) insert(event *model.Event, dryRun bool, generationType N case model.FileOpenEventType: return node.InsertFileEvent(&event.Open.File, event, generationType, at.Stats, dryRun), nil case model.DNSEventType: - return node.InsertDNSEvent(event, generationType, at.Stats, at.DNSNames, dryRun), nil + return node.InsertDNSEvent(event, generationType, at.Stats, at.DNSNames, dryRun, at.DNSMatchMaxDepth), nil case model.BindEventType: return node.InsertBindEvent(event, generationType, at.Stats, dryRun), nil case model.SyscallsEventType: @@ -253,6 +284,61 @@ func (at *ActivityTree) insert(event *model.Event, dryRun bool, generationType N return false, nil } +func isContainerRuntimePrefix(basename string) bool { + return strings.HasPrefix(basename, "runc") || strings.HasPrefix(basename, "containerd-shim") +} + +// isValidRootNode evaluates if the provided process entry is allowed to become a root node of an Activity Dump +func isValidRootNode(entry *model.ProcessContext) bool { + // an ancestor is required + ancestor := GetNextAncestorBinaryOrArgv0(entry) + if ancestor == nil { + return false + } + + if entry.FileEvent.IsFileless() { + // a fileless node is a valid root node only if not having runc as parent + // ex: runc -> exec(fileless) -> init.sh; exec(fileless) is not a valid root node + return !isContainerRuntimePrefix(ancestor.FileEvent.BasenameStr) + } + + // container runtime prefixes are not valid root nodes + return !isContainerRuntimePrefix(entry.FileEvent.BasenameStr) +} + +// GetNextAncestorBinaryOrArgv0 returns the first ancestor with a different binary, or a different argv0 in the case of busybox processes +func GetNextAncestorBinaryOrArgv0(entry *model.ProcessContext) *model.ProcessCacheEntry { + if entry == nil { + return nil + } + current := entry + ancestor := entry.Ancestor + for ancestor != nil { + if ancestor.FileEvent.Inode == 0 { + return nil + } + if current.FileEvent.Inode != ancestor.FileEvent.Inode { + return ancestor + } + if process.IsBusybox(current.FileEvent.PathnameStr) && process.IsBusybox(ancestor.FileEvent.PathnameStr) { + currentArgv0, _ := process.GetProcessArgv0(¤t.Process) + if len(currentArgv0) == 0 { + return nil + } + ancestorArgv0, _ := process.GetProcessArgv0(&ancestor.Process) + if len(ancestorArgv0) == 0 { + return nil + } + if currentArgv0 != ancestorArgv0 { + return ancestor + } + } + current = &ancestor.ProcessContext + ancestor = ancestor.Ancestor + } + return nil +} + // CreateProcessNode finds or a create a new process activity node in the activity dump if the entry // matches the activity dump selector. func (at *ActivityTree) CreateProcessNode(entry *model.ProcessCacheEntry, generationType NodeGenerationType, dryRun bool) (node *ProcessNode, newProcessNode bool, err error) { @@ -278,7 +364,7 @@ func (at *ActivityTree) CreateProcessNode(entry *model.ProcessCacheEntry, genera // find or create a ProcessActivityNode for the parent of the input ProcessCacheEntry. If the parent is a fork entry, // jump immediately to the next ancestor. - parentNode, newProcessNode, err := at.CreateProcessNode(entry.GetNextAncestorBinary(), Snapshot, dryRun) + parentNode, newProcessNode, err := at.CreateProcessNode(GetNextAncestorBinaryOrArgv0(&entry.ProcessContext), Snapshot, dryRun) if err != nil || (newProcessNode && dryRun) { // Explanation of (newProcessNode && dryRun): when dryRun is on, we can return as soon as we // see something new in the tree. Although `newProcessNode` and `err` seem to be tied (i.e. newProcessNode is @@ -304,11 +390,8 @@ func (at *ActivityTree) CreateProcessNode(entry *model.ProcessCacheEntry, genera } // we're about to add a root process node, make sure this root node passes the root node sanitizer - if !at.isValidRootNode(&entry.ProcessContext) { - if !dryRun { - at.Stats.droppedCount[model.ExecEventType][invalidRootNodeReason].Inc() - } - return nil, false, fmt.Errorf("invalid root node") + if !isValidRootNode(&entry.ProcessContext) { + return nil, false, nil } // if it doesn't, create a new ProcessActivityNode for the input ProcessCacheEntry @@ -349,10 +432,10 @@ func (at *ActivityTree) CreateProcessNode(entry *model.ProcessCacheEntry, genera return node, true, nil } -func (at *ActivityTree) FindMatchingRootNodes(basename string) []*ProcessNode { +func (at *ActivityTree) FindMatchingRootNodes(arg0 string) []*ProcessNode { var res []*ProcessNode for _, node := range at.ProcessNodes { - if node.Process.FileEvent.BasenameStr == basename { + if node.Process.Argv0 == arg0 { res = append(res, node) } } diff --git a/pkg/security/security_profile/activity_tree/activity_tree_stats.go b/pkg/security/security_profile/activity_tree/activity_tree_stats.go index acee6229701cb..1d9ff55b3de97 100644 --- a/pkg/security/security_profile/activity_tree/activity_tree_stats.go +++ b/pkg/security/security_profile/activity_tree/activity_tree_stats.go @@ -21,9 +21,9 @@ import ( // ActivityTreeStats represents the node counts in an activity dump type ActivityTreeStats struct { ProcessNodes int64 - fileNodes int64 - dnsNodes int64 - socketNodes int64 + FileNodes int64 + DNSNodes int64 + SocketNodes int64 processedCount map[model.EventType]*atomic.Uint64 addedCount map[model.EventType]map[NodeGenerationType]*atomic.Uint64 @@ -59,9 +59,9 @@ func NewActivityTreeNodeStats() *ActivityTreeStats { func (stats *ActivityTreeStats) ApproximateSize() int64 { var total int64 total += stats.ProcessNodes * int64(unsafe.Sizeof(ProcessNode{})) // 1024 - total += stats.fileNodes * int64(unsafe.Sizeof(FileNode{})) // 80 - total += stats.dnsNodes * int64(unsafe.Sizeof(DNSNode{})) // 24 - total += stats.socketNodes * int64(unsafe.Sizeof(SocketNode{})) // 40 + total += stats.FileNodes * int64(unsafe.Sizeof(FileNode{})) // 80 + total += stats.DNSNodes * int64(unsafe.Sizeof(DNSNode{})) // 24 + total += stats.SocketNodes * int64(unsafe.Sizeof(SocketNode{})) // 40 return total } diff --git a/pkg/security/security_profile/activity_tree/dns_node.go b/pkg/security/security_profile/activity_tree/dns_node.go index 3cb1a63e0340f..f8afa52786936 100644 --- a/pkg/security/security_profile/activity_tree/dns_node.go +++ b/pkg/security/security_profile/activity_tree/dns_node.go @@ -7,7 +7,11 @@ package activity_tree -import "github.com/DataDog/datadog-agent/pkg/security/secl/model" +import ( + "strings" + + "github.com/DataDog/datadog-agent/pkg/security/secl/model" +) // DNSNode is used to store a DNS node type DNSNode struct { @@ -25,3 +29,18 @@ func NewDNSNode(event *model.DNSEvent, rules []*model.MatchedRule, generationTyp Requests: []model.DNSEvent{*event}, } } + +func dnsFilterSubdomains(name string, maxDepth int) string { + tab := strings.Split(name, ".") + if len(tab) < maxDepth { + return name + } + result := "" + for i := 0; i < maxDepth; i++ { + if result != "" { + result = "." + result + } + result = tab[len(tab)-i-1] + result + } + return result +} diff --git a/pkg/security/security_profile/activity_tree/file_node.go b/pkg/security/security_profile/activity_tree/file_node.go index a2a4a310957f3..95efb348dea0e 100644 --- a/pkg/security/security_profile/activity_tree/file_node.go +++ b/pkg/security/security_profile/activity_tree/file_node.go @@ -135,7 +135,7 @@ func (fn *FileNode) InsertFileEvent(fileEvent *model.FileEvent, event *model.Eve if len(currentPath) <= nextParentIndex+1 { if !dryRun { currentFn.Children[parent] = NewFileNode(fileEvent, event, parent, generationType) - stats.fileNodes++ + stats.FileNodes++ } break } else { diff --git a/pkg/security/security_profile/activity_tree/process_node.go b/pkg/security/security_profile/activity_tree/process_node.go index 3ce4e566706bf..171f4273e4cfb 100644 --- a/pkg/security/security_profile/activity_tree/process_node.go +++ b/pkg/security/security_profile/activity_tree/process_node.go @@ -31,7 +31,13 @@ type ProcessNode struct { } func (pn *ProcessNode) getNodeLabel(args string) string { - label := fmt.Sprintf("%s %s", pn.Process.FileEvent.PathnameStr, args) + var label string + if sprocess.IsBusybox(pn.Process.FileEvent.PathnameStr) { + arg0, _ := sprocess.GetProcessArgv0(&pn.Process) + label = fmt.Sprintf("%s %s", arg0, args) + } else { + label = fmt.Sprintf("%s %s", pn.Process.FileEvent.PathnameStr, args) + } if len(pn.Process.FileEvent.PkgName) != 0 { label += fmt.Sprintf(" \\{%s %s\\}", pn.Process.FileEvent.PkgName, pn.Process.FileEvent.PkgVersion) } @@ -76,15 +82,13 @@ func (pn *ProcessNode) debug(w io.Writer, prefix string) { // scrubAndReleaseArgsEnvs scrubs the process args and envs, and then releases them func (pn *ProcessNode) scrubAndReleaseArgsEnvs(resolver *sprocess.Resolver) { if pn.Process.ArgsEntry != nil { - _, _ = resolver.GetProcessScrubbedArgv(&pn.Process) - pn.Process.Argv0, _ = resolver.GetProcessArgv0(&pn.Process) + resolver.GetProcessScrubbedArgv(&pn.Process) + sprocess.GetProcessArgv0(&pn.Process) pn.Process.ArgsEntry = nil } if pn.Process.EnvsEntry != nil { - envs, envsTruncated := resolver.GetProcessEnvs(&pn.Process) - pn.Process.Envs = envs - pn.Process.EnvsTruncated = envsTruncated + resolver.GetProcessEnvs(&pn.Process) pn.Process.EnvsEntry = nil } } @@ -92,38 +96,26 @@ func (pn *ProcessNode) scrubAndReleaseArgsEnvs(resolver *sprocess.Resolver) { // Matches return true if the process fields used to generate the dump are identical with the provided model.Process func (pn *ProcessNode) Matches(entry *model.Process, matchArgs bool) bool { if pn.Process.FileEvent.PathnameStr == entry.FileEvent.PathnameStr { - if matchArgs { - var panArgs, entryArgs []string - if pn.Process.ArgsEntry != nil { - panArgs, _ = sprocess.GetProcessArgv(&pn.Process) - } else { - panArgs = pn.Process.Argv - } - if entry.ArgsEntry != nil { - entryArgs, _ = sprocess.GetProcessArgv(entry) - } else { - entryArgs = entry.Argv + if sprocess.IsBusybox(entry.FileEvent.PathnameStr) { + panArg0, _ := sprocess.GetProcessArgv0(&pn.Process) + entryArg0, _ := sprocess.GetProcessArgv0(entry) + if panArg0 != entryArg0 { + return false } + } + if matchArgs { + panArgs, _ := sprocess.GetProcessArgv(&pn.Process) + entryArgs, _ := sprocess.GetProcessArgv(entry) if len(panArgs) != len(entryArgs) { return false } - - var found bool - for _, arg1 := range panArgs { - found = false - for _, arg2 := range entryArgs { - if arg1 == arg2 { - found = true - break - } - } - if !found { + for i, arg := range panArgs { + if arg != entryArgs[i] { return false } } return true } - return true } return false @@ -173,31 +165,51 @@ func (pn *ProcessNode) InsertFileEvent(fileEvent *model.FileEvent, event *model. // this is the last child, add the fileEvent context at the leaf of the files tree. node := NewFileNode(fileEvent, event, parent, generationType) node.MatchedRules = model.AppendMatchedRule(node.MatchedRules, event.Rules) - stats.fileNodes++ + stats.FileNodes++ pn.Files[parent] = node } else { // This is an intermediary node in the branch that leads to the leaf we want to add. Create a node without the // fileEvent context. newChild := NewFileNode(nil, nil, parent, generationType) newChild.InsertFileEvent(fileEvent, event, fileEvent.PathnameStr[nextParentIndex:], generationType, stats, dryRun) - stats.fileNodes++ + stats.FileNodes++ pn.Files[parent] = newChild } } return true } +func (pn *ProcessNode) findDNSNode(DNSName string, DNSMatchMaxDepth int, DNSType uint16) bool { + if DNSMatchMaxDepth == 0 { + _, ok := pn.DNSNames[DNSName] + return ok + } + + toSearch := dnsFilterSubdomains(DNSName, DNSMatchMaxDepth) + for name, dnsNode := range pn.DNSNames { + if dnsFilterSubdomains(name, DNSMatchMaxDepth) == toSearch { + for _, req := range dnsNode.Requests { + if req.Type == DNSType { + return true + } + } + } + } + return false +} + // InsertDNSEvent inserts a DNS event in a process node -func (pn *ProcessNode) InsertDNSEvent(evt *model.Event, generationType NodeGenerationType, stats *ActivityTreeStats, DNSNames *utils.StringKeys, dryRun bool) bool { - if !dryRun { - DNSNames.Insert(evt.DNS.Name) +func (pn *ProcessNode) InsertDNSEvent(evt *model.Event, generationType NodeGenerationType, stats *ActivityTreeStats, DNSNames *utils.StringKeys, dryRun bool, dnsMatchMaxDepth int) bool { + if dryRun { + // Use DNSMatchMaxDepth only when searching for a node, not when trying to insert + return !pn.findDNSNode(evt.DNS.Name, dnsMatchMaxDepth, evt.DNS.Type) } - if dnsNode, ok := pn.DNSNames[evt.DNS.Name]; ok { + DNSNames.Insert(evt.DNS.Name) + dnsNode, ok := pn.DNSNames[evt.DNS.Name] + if ok { // update matched rules - if !dryRun { - dnsNode.MatchedRules = model.AppendMatchedRule(dnsNode.MatchedRules, evt.Rules) - } + dnsNode.MatchedRules = model.AppendMatchedRule(dnsNode.MatchedRules, evt.Rules) // look for the DNS request type for _, req := range dnsNode.Requests { @@ -206,17 +218,13 @@ func (pn *ProcessNode) InsertDNSEvent(evt *model.Event, generationType NodeGener } } - if !dryRun { - // insert the new request - dnsNode.Requests = append(dnsNode.Requests, evt.DNS) - } + // insert the new request + dnsNode.Requests = append(dnsNode.Requests, evt.DNS) return true } - if !dryRun { - pn.DNSNames[evt.DNS.Name] = NewDNSNode(&evt.DNS, evt.Rules, generationType) - stats.dnsNodes++ - } + pn.DNSNames[evt.DNS.Name] = NewDNSNode(&evt.DNS, evt.Rules, generationType) + stats.DNSNodes++ return true } @@ -238,7 +246,7 @@ func (pn *ProcessNode) InsertBindEvent(evt *model.Event, generationType NodeGene if sock == nil { sock = NewSocketNode(evtFamily, generationType) if !dryRun { - stats.socketNodes++ + stats.SocketNodes++ pn.Sockets = append(pn.Sockets, sock) } newNode = true diff --git a/pkg/security/security_profile/activity_tree/process_node_snapshot.go b/pkg/security/security_profile/activity_tree/process_node_snapshot.go index b234fd6a4867c..f90e386a9f2fd 100644 --- a/pkg/security/security_profile/activity_tree/process_node_snapshot.go +++ b/pkg/security/security_profile/activity_tree/process_node_snapshot.go @@ -18,8 +18,10 @@ import ( "syscall" "time" + legacyprocess "github.com/DataDog/gopsutil/process" "github.com/prometheus/procfs" "github.com/shirou/gopsutil/v3/process" + "golang.org/x/exp/slices" "golang.org/x/sys/unix" "github.com/DataDog/datadog-agent/pkg/process/util" @@ -75,16 +77,14 @@ func (pn *ProcessNode) snapshotFiles(p *process.Process, stats *ActivityTreeStat } // list the mmaped files of the process - memoryMaps, err := p.MemoryMaps(false) + mmapedFiles, err := snapshotMemoryMappedFiles(p.Pid, pn.Process.FileEvent.PathnameStr) if err != nil { return err } + files = append(files, mmapedFiles...) - for _, mm := range *memoryMaps { - if mm.Path != pn.Process.FileEvent.PathnameStr { - files = append(files, mm.Path) - } - } + // often the mmaped files are already nearly sorted, so we take the quick win and de-duplicate without sorting + files = slices.Compact(files) // insert files var fileinfo os.FileInfo @@ -109,12 +109,12 @@ func (pn *ProcessNode) snapshotFiles(p *process.Process, stats *ActivityTreeStat evt.Type = uint32(model.FileOpenEventType) resolvedPath, err = filepath.EvalSymlinks(f) - if err != nil { - evt.Open.File.PathnameStr = resolvedPath + if err == nil && len(resolvedPath) != 0 { + evt.Open.File.SetPathnameStr(resolvedPath) } else { - evt.Open.File.PathnameStr = f + evt.Open.File.SetPathnameStr(f) } - evt.Open.File.BasenameStr = path.Base(evt.Open.File.PathnameStr) + evt.Open.File.SetBasenameStr(path.Base(evt.Open.File.PathnameStr)) evt.Open.File.FileFields.Mode = uint16(stat.Mode) evt.Open.File.FileFields.Inode = stat.Ino evt.Open.File.FileFields.UID = stat.Uid @@ -128,6 +128,34 @@ func (pn *ProcessNode) snapshotFiles(p *process.Process, stats *ActivityTreeStat return nil } +const MAX_MMAPED_FILES = 128 + +func snapshotMemoryMappedFiles(pid int32, processEventPath string) ([]string, error) { + fakeprocess := legacyprocess.Process{Pid: pid} + stats, err := fakeprocess.MemoryMaps(false) + if err != nil || stats == nil { + return nil, err + } + + files := make([]string, 0, MAX_MMAPED_FILES) + for _, mm := range *stats { + if len(files) >= MAX_MMAPED_FILES { + break + } + + if len(mm.Path) == 0 { + continue + } + + if mm.Path != processEventPath { + continue + } + + files = append(files, mm.Path) + } + return files, nil +} + func (pn *ProcessNode) snapshotBoundSockets(p *process.Process, stats *ActivityTreeStats, newEvent func() *model.Event) error { // list all the file descriptors opened by the process FDs, err := p.OpenFiles() diff --git a/pkg/security/security_profile/dump/activity_dump.go b/pkg/security/security_profile/dump/activity_dump.go index 57884d5c5dfd9..732c4dc0e2b31 100644 --- a/pkg/security/security_profile/dump/activity_dump.go +++ b/pkg/security/security_profile/dump/activity_dump.go @@ -605,7 +605,7 @@ func (ad *ActivityDump) ToSecurityActivityDumpMessage() *api.ActivityDumpMessage Service: ad.Service, Tags: ad.Tags, Storage: storage, - Metadata: &api.ActivityDumpMetadataMessage{ + Metadata: &api.MetadataMessage{ AgentVersion: ad.Metadata.AgentVersion, AgentCommit: ad.Metadata.AgentCommit, KernelVersion: ad.Metadata.KernelVersion, diff --git a/pkg/security/security_profile/dump/load_controller.go b/pkg/security/security_profile/dump/load_controller.go index 603ce4dcb357b..e338ef2b84107 100644 --- a/pkg/security/security_profile/dump/load_controller.go +++ b/pkg/security/security_profile/dump/load_controller.go @@ -22,13 +22,14 @@ import ( var ( // TracedEventTypesReductionOrder is the order by which event types are reduced TracedEventTypesReductionOrder = []model.EventType{model.BindEventType, model.DNSEventType, model.SyscallsEventType, model.FileOpenEventType} - // MinDumpTimeout is the shortest timeout for a dump - MinDumpTimeout = 10 * time.Minute + + absoluteMinimumDumpTimeout = 10 * time.Second ) // ActivityDumpLoadController is a load controller allowing dynamic change of Activity Dump configuration type ActivityDumpLoadController struct { - adm *ActivityDumpManager + adm *ActivityDumpManager + minDumpTimeout time.Duration // eBPF maps activityDumpConfigDefaults *ebpf.Map @@ -44,9 +45,15 @@ func NewActivityDumpLoadController(adm *ActivityDumpManager) (*ActivityDumpLoadC return nil, fmt.Errorf("couldn't find activity_dump_config_defaults map") } + minDumpTimeout := adm.config.RuntimeSecurity.ActivityDumpLoadControlMinDumpTimeout + if minDumpTimeout < absoluteMinimumDumpTimeout { + minDumpTimeout = absoluteMinimumDumpTimeout + } + return &ActivityDumpLoadController{ activityDumpConfigDefaults: activityDumpConfigDefaultsMap, adm: adm, + minDumpTimeout: minDumpTimeout, }, nil } @@ -99,19 +106,19 @@ func (lc *ActivityDumpLoadController) NextPartialDump(ad *ActivityDump) *Activit newDump.LoadConfig.Rate = ad.LoadConfig.Rate newDump.LoadConfigCookie = ad.LoadConfigCookie - if timeToThreshold < MinDumpTimeout { + if timeToThreshold < lc.minDumpTimeout { if err := lc.reduceDumpRate(ad, newDump); err != nil { seclog.Errorf("%v", err) } } - if timeToThreshold < MinDumpTimeout/2 && ad.LoadConfig.Timeout > MinDumpTimeout { + if timeToThreshold < lc.minDumpTimeout/2 && ad.LoadConfig.Timeout > lc.minDumpTimeout { if err := lc.reduceDumpTimeout(newDump); err != nil { seclog.Errorf("%v", err) } } - if timeToThreshold < MinDumpTimeout/4 { + if timeToThreshold < lc.minDumpTimeout/4 { if err := lc.reduceTracedEventTypes(ad, newDump); err != nil { seclog.Errorf("%v", err) } @@ -144,10 +151,9 @@ reductionOrder: } for _, evt := range old.LoadConfig.TracedEventTypes { - if evt == evtToRemove { - continue + if evt != evtToRemove { + new.LoadConfig.TracedEventTypes = append(new.LoadConfig.TracedEventTypes, evt) } - new.LoadConfig.TracedEventTypes = append(new.LoadConfig.TracedEventTypes, evt) } // send metric @@ -162,8 +168,8 @@ reductionOrder: // reduceDumpTimeout reduces the dump timeout configuration func (lc *ActivityDumpLoadController) reduceDumpTimeout(new *ActivityDump) error { newTimeout := new.LoadConfig.Timeout * 3 / 4 // reduce by 25% - if newTimeout < MinDumpTimeout { - newTimeout = MinDumpTimeout + if minTimeout := lc.adm.config.RuntimeSecurity.ActivityDumpLoadControlMinDumpTimeout; newTimeout < minTimeout { + newTimeout = minTimeout } new.SetTimeout(newTimeout) diff --git a/pkg/security/security_profile/dump/manager.go b/pkg/security/security_profile/dump/manager.go index fba1cca8e0bef..f81d39f11f8b7 100644 --- a/pkg/security/security_profile/dump/manager.go +++ b/pkg/security/security_profile/dump/manager.go @@ -415,7 +415,8 @@ func (adm *ActivityDumpManager) DumpActivity(params *api.ActivityDumpParams) (*a newDump := NewActivityDump(adm, func(ad *ActivityDump) { ad.Metadata.Comm = params.GetComm() ad.Metadata.ContainerID = params.GetContainerID() - ad.SetTimeout(time.Duration(params.Timeout) * time.Minute) + dumpDuration, _ := time.ParseDuration(params.Timeout) + ad.SetTimeout(dumpDuration) if params.GetDifferentiateArgs() { ad.Metadata.DifferentiateArgs = true @@ -548,10 +549,10 @@ func (adm *ActivityDumpManager) SearchTracedProcessCacheEntryCallback(ad *Activi // compute the list of ancestors, we need to start inserting them from the root ancestors := []*model.ProcessCacheEntry{entry} - parent := entry.GetNextAncestorBinary() + parent := activity_tree.GetNextAncestorBinaryOrArgv0(&entry.ProcessContext) for parent != nil { ancestors = append([]*model.ProcessCacheEntry{parent}, ancestors...) - parent = parent.GetNextAncestorBinary() + parent = activity_tree.GetNextAncestorBinaryOrArgv0(&parent.ProcessContext) } for _, parent = range ancestors { diff --git a/pkg/security/security_profile/dump/storage_manager.go b/pkg/security/security_profile/dump/storage_manager.go index b561d4f898d8d..811ba78c413e4 100644 --- a/pkg/security/security_profile/dump/storage_manager.go +++ b/pkg/security/security_profile/dump/storage_manager.go @@ -122,9 +122,13 @@ func (manager *ActivityDumpStorageManager) PersistRaw(requests []config.StorageR if manager.statsdClient != nil { if size := len(raw.Bytes()); size > 0 { tags := []string{"format:" + request.Format.String(), "storage_type:" + request.Type.String(), fmt.Sprintf("compression:%v", request.Compression)} - if err := manager.statsdClient.Gauge(metrics.MetricActivityDumpSizeInBytes, float64(size), tags, 1.0); err != nil { + if err := manager.statsdClient.Count(metrics.MetricActivityDumpSizeInBytes, int64(size), tags, 1.0); err != nil { seclog.Warnf("couldn't send %s metric: %v", metrics.MetricActivityDumpSizeInBytes, err) } + + if err := manager.statsdClient.Count(metrics.MetricActivityDumpPersistedDumps, 1, tags, 1.0); err != nil { + seclog.Warnf("couldn't send %s metric: %v", metrics.MetricActivityDumpPersistedDumps, err) + } } } } diff --git a/pkg/security/security_profile/profile/manager.go b/pkg/security/security_profile/profile/manager.go index 02b2b21c64bc2..ebc051a17d262 100644 --- a/pkg/security/security_profile/profile/manager.go +++ b/pkg/security/security_profile/profile/manager.go @@ -9,8 +9,8 @@ package profile import ( "context" - "errors" "fmt" + "os" "sync" "time" @@ -24,6 +24,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/metrics" + "github.com/DataDog/datadog-agent/pkg/security/proto/api" "github.com/DataDog/datadog-agent/pkg/security/rconfig" "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup" cgroupModel "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup/model" @@ -34,46 +35,82 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/utils" ) -// EventFilteringResult is used to compute metrics for the event filtering feature -type EventFilteringResult uint8 +// DefaultProfileName used as default profile name +const DefaultProfileName = "default" + +// EventFilteringProfileState is used to compute metrics for the event filtering feature +type EventFilteringProfileState uint8 const ( // NoProfile is used to count the events for which we didn't have a profile - NoProfile EventFilteringResult = iota - // InProfile is used to count the events that matched a profile - InProfile - // NotInProfile is used to count the events that didn't match their profile - NotInProfile + NoProfile EventFilteringProfileState = iota // UnstableProfile is used to count the events that didn't make it into a profile because their matching profile was // unstable UnstableProfile - // WorkloadWarmup is used to count the unmatched events with a profile skipped due to workload warm up time + // UnstableEventType is used to count the events that didn't make it into a profile because their matching profile was + // unstable for their event type + UnstableEventType + // StableProfile is used to count the events linked to a stable profile + StableProfile + // AutoLearning is used to count the event during the auto learning phase + AutoLearning + // WorkloadWarmup is used to count the learned events due to workload warm up time WorkloadWarmup ) -// DefaultProfileName used as default profile name -const DefaultProfileName = "default" +func (efr EventFilteringProfileState) toTag() string { + switch efr { + case NoProfile: + return "profile_state:no_profile" + case UnstableProfile: + return "profile_state:unstable_profile" + case UnstableEventType: + return "profile_state:unstable_event_type" + case StableProfile: + return "profile_state:stable_profile" + case AutoLearning: + return "profile_state:auto_learning" + case WorkloadWarmup: + return "profile_state:workload_warmup" + } + return "" +} + +// EventFilteringResult is used to compute metrics for the event filtering feature +type EventFilteringResult uint8 + +const ( + // Not applicable, for profil NoProfile and UnstableProfile state + NA EventFilteringResult = iota + // InProfile is used to count the events that matched a profile + InProfile + // NotInProfile is used to count the events that didn't match their profile + NotInProfile +) func (efr EventFilteringResult) toTag() string { switch efr { - case NoProfile: - return fmt.Sprintf("in_profile:no_profile") + case NA: + return "" case InProfile: - return fmt.Sprintf("in_profile:true") + return "in_profile:true" case NotInProfile: - return fmt.Sprintf("in_profile:false") - case UnstableProfile: - return fmt.Sprintf("in_profile:unstable_profile") + return "in_profile:false" } return "" } var ( - allEventFilteringResults = []EventFilteringResult{NoProfile, InProfile, NotInProfile, UnstableProfile} - errUnstableProfileSizeLimitReached = errors.New("unstable profile: size limit reached") - errUnstableProfileTimeLimitReached = errors.New("unstable profile: time limit reached") + allEventFilteringProfileState = []EventFilteringProfileState{NoProfile, UnstableProfile, UnstableEventType, StableProfile, AutoLearning, WorkloadWarmup} + allEventFilteringResults = []EventFilteringResult{InProfile, NotInProfile, NA} ) +type eventFilteringEntry struct { + eventType model.EventType + state EventFilteringProfileState + result EventFilteringResult +} + // SecurityProfileManager is used to manage Security Profiles type SecurityProfileManager struct { config *config.Config @@ -94,7 +131,7 @@ type SecurityProfileManager struct { cacheHit *atomic.Uint64 cacheMiss *atomic.Uint64 - eventFiltering map[model.EventType]map[EventFilteringResult]*atomic.Uint64 + eventFiltering map[eventFilteringEntry]*atomic.Uint64 } // NewSecurityProfileManager returns a new instance of SecurityProfileManager @@ -147,12 +184,18 @@ func NewSecurityProfileManager(config *config.Config, statsdClient statsd.Client pendingCache: profileCache, cacheHit: atomic.NewUint64(0), cacheMiss: atomic.NewUint64(0), - eventFiltering: make(map[model.EventType]map[EventFilteringResult]*atomic.Uint64), + eventFiltering: make(map[eventFilteringEntry]*atomic.Uint64), } + for i := model.EventType(0); i < model.MaxKernelEventType; i++ { - m.eventFiltering[i] = make(map[EventFilteringResult]*atomic.Uint64) - for _, result := range allEventFilteringResults { - m.eventFiltering[i][result] = atomic.NewUint64(0) + for _, state := range allEventFilteringProfileState { + for _, result := range allEventFilteringResults { + m.eventFiltering[eventFilteringEntry{ + eventType: i, + state: state, + result: result, + }] = atomic.NewUint64(0) + } } } @@ -215,6 +258,8 @@ func (m *SecurityProfileManager) OnWorkloadSelectorResolvedEvent(workload *cgrou defer m.pendingCacheLock.Unlock() profile, ok = m.pendingCache.Get(workload.WorkloadSelector) if ok { + m.cacheHit.Inc() + // remove profile from cache _ = m.pendingCache.Remove(workload.WorkloadSelector) @@ -232,6 +277,8 @@ func (m *SecurityProfileManager) OnWorkloadSelectorResolvedEvent(workload *cgrou // insert the profile in the list of active profiles m.profiles[workload.WorkloadSelector] = profile } else { + m.cacheMiss.Inc() + // create a new entry profile = NewSecurityProfile(workload.WorkloadSelector, m.config.RuntimeSecurity.AnomalyDetectionEventTypes) m.profiles[workload.WorkloadSelector] = profile @@ -373,7 +420,7 @@ func (m *SecurityProfileManager) ShouldDeleteProfile(profile *SecurityProfile) { // cleanup profile before insertion in cache profile.reset() - if profile.selector.IsEmpty() { + if profile.selector.IsReady() { // do not insert in cache return } @@ -407,6 +454,10 @@ func (m *SecurityProfileManager) OnNewProfileEvent(selector cgroupModel.Workload // decode the content of the profile ProtoToSecurityProfile(profile, newProfile) + profile.ActivityTree.DNSMatchMaxDepth = m.config.RuntimeSecurity.SecurityProfileDNSMatchMaxDepth + + // compute activity tree initial stats + profile.ActivityTree.ComputeActivityTreeStats() // prepare the profile for insertion m.prepareProfile(profile) @@ -440,22 +491,40 @@ func (m *SecurityProfileManager) stop() { } } +func (m *SecurityProfileManager) incrementEventFilteringStat(eventType model.EventType, state EventFilteringProfileState, result EventFilteringResult) { + m.eventFiltering[eventFilteringEntry{eventType, state, result}].Inc() +} + // SendStats sends metrics about the Security Profile manager func (m *SecurityProfileManager) SendStats() error { m.profilesLock.Lock() defer m.profilesLock.Unlock() - if val := float64(len(m.profiles)); val > 0 { - if err := m.statsdClient.Gauge(metrics.MetricSecurityProfileActiveProfiles, val, []string{}, 1.0); err != nil { - return fmt.Errorf("couldn't send MetricSecurityProfileActiveProfiles: %w", err) - } - } + profileStats := make(map[model.Status]map[bool]float64) for _, profile := range m.profiles { if profile.loadedInKernel { // make sure the profile is loaded if err := profile.SendStats(m.statsdClient); err != nil { return fmt.Errorf("couldn't send metrics for [%s]: %w", profile.selector.String(), err) } } + if profileStats[profile.Status] == nil { + profileStats[profile.Status] = make(map[bool]float64) + } + profileStats[profile.Status][profile.loadedInKernel] += 1 + } + + for status, counts := range profileStats { + for inKernel, count := range counts { + tags := []string{ + fmt.Sprintf("in_kernel:%v", inKernel), + fmt.Sprintf("anomaly_detection:%v", status.IsEnabled(model.AnomalyDetection)), + fmt.Sprintf("auto_suppression:%v", status.IsEnabled(model.AutoSuppression)), + fmt.Sprintf("workload_hardening:%v", status.IsEnabled(model.WorkloadHardening)), + } + if err := m.statsdClient.Gauge(metrics.MetricSecurityProfileProfiles, count, tags, 1.0); err != nil { + return fmt.Errorf("couldn't send MetricSecurityProfileProfiles: %w", err) + } + } } m.pendingCacheLock.Lock() @@ -478,13 +547,11 @@ func (m *SecurityProfileManager) SendStats() error { } } - for evtType, filteringCounts := range m.eventFiltering { - for result, count := range filteringCounts { - tags := []string{fmt.Sprintf("event_type:%s", evtType), result.toTag()} - if value := count.Swap(0); value > 0 { - if err := m.statsdClient.Count(metrics.MetricSecurityProfileEventFiltering, int64(value), tags, 1.0); err != nil { - return fmt.Errorf("couldn't send MetricSecurityProfileEventFiltering metric: %w", err) - } + for entry, count := range m.eventFiltering { + tags := []string{fmt.Sprintf("event_type:%s", entry.eventType), entry.state.toTag(), entry.result.toTag()} + if value := count.Swap(0); value > 0 { + if err := m.statsdClient.Count(metrics.MetricSecurityProfileEventFiltering, int64(value), tags, 1.0); err != nil { + return fmt.Errorf("couldn't send MetricSecurityProfileEventFiltering metric: %w", err) } } } @@ -575,57 +642,49 @@ func (m *SecurityProfileManager) LookupEventInProfiles(event *model.Event) { // lookup profile profile := m.GetProfile(selector) if profile == nil || profile.Status == 0 { - m.eventFiltering[event.GetEventType()][NoProfile].Inc() + m.incrementEventFilteringStat(event.GetEventType(), NoProfile, NA) return } _ = event.FieldHandlers.ResolveContainerCreatedAt(event, event.ContainerContext) - markEventAsInProfile := func(inProfile bool) { - // link the profile to the event only if it's a valid event for profile without any error + // check if the event should be injected in the profile automatically + profileState := m.tryAutolearn(profile, event) + switch profileState { + case UnstableProfile, NoProfile: // an error occurred + return + case AutoLearning, WorkloadWarmup: + // the event was either already in the profile, or has just been inserted FillProfileContextFromProfile(&event.SecurityProfileContext, profile) - - if inProfile { + event.AddToFlags(model.EventFlagsSecurityProfileInProfile) + return + case StableProfile: + // check if the event is in its profile + found, err := profile.ActivityTree.Contains(event, activity_tree.ProfileDrift) + if err != nil { + // ignore, evaluation failed + m.incrementEventFilteringStat(event.GetEventType(), NoProfile, NA) + return + } + FillProfileContextFromProfile(&event.SecurityProfileContext, profile) + if found { event.AddToFlags(model.EventFlagsSecurityProfileInProfile) - m.eventFiltering[event.GetEventType()][InProfile].Inc() + m.incrementEventFilteringStat(event.GetEventType(), profileState, InProfile) } else { - m.eventFiltering[event.GetEventType()][NotInProfile].Inc() + m.incrementEventFilteringStat(event.GetEventType(), profileState, NotInProfile) } } - - // check if the event should be injected in the profile automatically - if autoLearned, err := m.tryAutolearn(profile, event); err != nil { - return - } else if autoLearned { - markEventAsInProfile(true) - return - } - - // check if the event is in its profile - found, err := profile.ActivityTree.Contains(event, activity_tree.ProfileDrift) - if err != nil { - // ignore, evaluation failed - m.eventFiltering[event.GetEventType()][NoProfile].Inc() - return - } - - markEventAsInProfile(found) } -// tryAutolearn tries to autolearn the input event. The first return values is true if the event was autolearned, -// in which case the second return value tells whether the node was already in the profile. -func (m *SecurityProfileManager) tryAutolearn(profile *SecurityProfile, event *model.Event) (bool, error) { - // check if the unstable size limit was reached - if profile.ActivityTree.Stats.ApproximateSize() >= m.config.RuntimeSecurity.AnomalyDetectionUnstableProfileSizeThreshold { - m.eventFiltering[event.GetEventType()][UnstableProfile].Inc() - return false, errUnstableProfileSizeLimitReached - } - +// tryAutolearn tries to autolearn the input event. It returns the profile state: stable, unstable, autolearning or workloadwarmup +func (m *SecurityProfileManager) tryAutolearn(profile *SecurityProfile, event *model.Event) EventFilteringProfileState { var nodeType activity_tree.NodeGenerationType + var profileState EventFilteringProfileState // check if we are at the beginning of a workload lifetime if event.ResolveEventTime().Sub(time.Unix(0, int64(event.ContainerContext.CreatedAt))) < m.config.RuntimeSecurity.AnomalyDetectionWorkloadWarmupPeriod { nodeType = activity_tree.WorkloadWarmup + profileState = WorkloadWarmup } else { // have we reached the stable state time limit ? lastAnomalyNano, ok := profile.lastAnomalyNano[event.GetEventType()] @@ -634,31 +693,109 @@ func (m *SecurityProfileManager) tryAutolearn(profile *SecurityProfile, event *m lastAnomalyNano = profile.loadedNano } if time.Duration(event.TimestampRaw-lastAnomalyNano) >= m.config.RuntimeSecurity.AnomalyDetectionMinimumStablePeriod { - return false, nil + return StableProfile } // have we reached the unstable time limit ? if time.Duration(event.TimestampRaw-profile.loadedNano) >= m.config.RuntimeSecurity.AnomalyDetectionUnstableProfileTimeThreshold { - m.eventFiltering[event.GetEventType()][UnstableProfile].Inc() - return false, errUnstableProfileTimeLimitReached + m.incrementEventFilteringStat(event.GetEventType(), UnstableEventType, NA) + return UnstableEventType } nodeType = activity_tree.ProfileDrift + profileState = AutoLearning + } + + // here we are either in AutoLearning or WorkloadWarmup + + // check if the unstable size limit was reached + if profile.ActivityTree.Stats.ApproximateSize() >= m.config.RuntimeSecurity.AnomalyDetectionUnstableProfileSizeThreshold { + m.incrementEventFilteringStat(event.GetEventType(), UnstableProfile, NA) + return UnstableProfile } // try to insert the event in the profile newEntry, err := profile.ActivityTree.Insert(event, nodeType) if err != nil { - m.eventFiltering[event.GetEventType()][NoProfile].Inc() - return false, err + m.incrementEventFilteringStat(event.GetEventType(), NoProfile, NA) + return NoProfile + } else if newEntry { + profile.lastAnomalyNano[event.GetEventType()] = event.TimestampRaw + m.incrementEventFilteringStat(event.GetEventType(), profileState, NotInProfile) + } else { // no newEntry + m.incrementEventFilteringStat(event.GetEventType(), profileState, InProfile) } + return profileState +} - // the event was either already in the profile, or has just been inserted - event.AddToFlags(model.EventFlagsSecurityProfileInProfile) +// ListSecurityProfiles returns the list of security profiles +func (m *SecurityProfileManager) ListSecurityProfiles(params *api.SecurityProfileListParams) (*api.SecurityProfileListMessage, error) { + var out api.SecurityProfileListMessage - if newEntry { - profile.lastAnomalyNano[event.GetEventType()] = event.TimestampRaw + m.profilesLock.Lock() + defer m.profilesLock.Unlock() + + for _, p := range m.profiles { + msg := p.ToSecurityProfileMessage(m.timeResolver, m.config.RuntimeSecurity.AnomalyDetectionMinimumStablePeriod) + out.Profiles = append(out.Profiles, msg) + } + + if params.GetIncludeCache() { + m.pendingCacheLock.Lock() + defer m.pendingCacheLock.Unlock() + for _, k := range m.pendingCache.Keys() { + p, ok := m.pendingCache.Peek(k) + if !ok { + continue + } + msg := p.ToSecurityProfileMessage(m.timeResolver, m.config.RuntimeSecurity.AnomalyDetectionMinimumStablePeriod) + out.Profiles = append(out.Profiles, msg) + } + } + return &out, nil +} + +// SaveSecurityProfile saves the requested security profile to disk +func (m *SecurityProfileManager) SaveSecurityProfile(params *api.SecurityProfileSaveParams) (*api.SecurityProfileSaveMessage, error) { + selector, err := cgroupModel.NewWorkloadSelector(params.GetSelector().GetName(), params.GetSelector().GetTag()) + if err != nil { + return &api.SecurityProfileSaveMessage{ + Error: err.Error(), + }, nil + } + + p := m.GetProfile(selector) + if p == nil { + return &api.SecurityProfileSaveMessage{ + Error: fmt.Sprintf("security profile not found"), + }, nil + } + + // encode profile + psp := SecurityProfileToProto(p) + if psp == nil { + return &api.SecurityProfileSaveMessage{ + Error: fmt.Sprintf("security profile not found"), + }, nil + } + + raw, err := psp.MarshalVT() + if err != nil { + return nil, fmt.Errorf("couldn't encode security profile in %s: %v", config.Protobuf, err) + } + + // write profile to encoded profile to disk + f, err := os.CreateTemp("/tmp", fmt.Sprintf("%s-*.profile", p.Metadata.Name)) + if err != nil { + return nil, fmt.Errorf("couldn't create temporary file: %w", err) + } + defer f.Close() + + if _, err = f.Write(raw); err != nil { + return nil, fmt.Errorf("couldn't write to temporary file: %w", err) } - return true, nil + return &api.SecurityProfileSaveMessage{ + File: f.Name(), + }, nil } diff --git a/pkg/security/security_profile/profile/profile.go b/pkg/security/security_profile/profile/profile.go index 9a3667c88603b..3bd26b41c0116 100644 --- a/pkg/security/security_profile/profile/profile.go +++ b/pkg/security/security_profile/profile/profile.go @@ -12,13 +12,16 @@ import ( "io" "os" "sync" + "time" "golang.org/x/exp/slices" proto "github.com/DataDog/agent-payload/v5/cws/dumpsv1" "github.com/DataDog/datadog-go/v5/statsd" + "github.com/DataDog/datadog-agent/pkg/security/proto/api" cgroupModel "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup/model" + timeResolver "github.com/DataDog/datadog-agent/pkg/security/resolvers/time" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/security_profile/activity_tree" mtdt "github.com/DataDog/datadog-agent/pkg/security/security_profile/activity_tree/metadata" @@ -74,6 +77,9 @@ func NewSecurityProfile(selector cgroupModel.WorkloadSelector, anomalyDetectionE // reset empties all internal fields so that this profile can be used again in the future func (p *SecurityProfile) reset() { p.loadedInKernel = false + p.loadedNano = 0 + p.profileCookie = 0 + p.lastAnomalyNano = make(map[model.EventType]uint64) p.Instances = nil } @@ -155,3 +161,52 @@ func (profile *SecurityProfile) SendStats(client statsd.ClientInterface) error { defer profile.Unlock() return profile.ActivityTree.SendStats(client) } + +// ToSecurityProfileMessage returns a SecurityProfileMessage filled with the content of the current Security Profile +func (p *SecurityProfile) ToSecurityProfileMessage(timeResolver *timeResolver.Resolver, minimumStablePeriod time.Duration) *api.SecurityProfileMessage { + msg := &api.SecurityProfileMessage{ + LoadedInKernel: p.loadedInKernel, + LoadedInKernelTimestamp: timeResolver.ResolveMonotonicTimestamp(p.loadedNano).String(), + Selector: &api.WorkloadSelectorMessage{ + Name: p.selector.Image, + Tag: p.selector.Tag, + }, + ProfileCookie: p.profileCookie, + Status: p.Status.String(), + Version: p.Version, + Metadata: &api.MetadataMessage{ + Name: p.Metadata.Name, + }, + Tags: p.Tags, + } + if p.ActivityTree != nil { + msg.Stats = &api.ActivityTreeStatsMessage{ + ProcessNodesCount: p.ActivityTree.Stats.ProcessNodes, + FileNodesCount: p.ActivityTree.Stats.FileNodes, + DNSNodesCount: p.ActivityTree.Stats.DNSNodes, + SocketNodesCount: p.ActivityTree.Stats.SocketNodes, + ApproximateSize: p.ActivityTree.Stats.ApproximateSize(), + } + } + + for _, evt := range p.anomalyDetectionEvents { + msg.AnomalyDetectionEvents = append(msg.AnomalyDetectionEvents, evt.String()) + } + + for evt, ts := range p.lastAnomalyNano { + lastAnomaly := timeResolver.ResolveMonotonicTimestamp(ts) + msg.LastAnomalies = append(msg.LastAnomalies, &api.LastAnomalyTimestampMessage{ + EventType: evt.String(), + Timestamp: lastAnomaly.String(), + IsStableEventType: time.Now().Sub(lastAnomaly) >= minimumStablePeriod, + }) + } + + for _, inst := range p.Instances { + msg.Instances = append(msg.Instances, &api.InstanceMessage{ + ContainerID: inst.ID, + Tags: inst.Tags, + }) + } + return msg +} diff --git a/pkg/security/security_profile/profile/profile_dir.go b/pkg/security/security_profile/profile/profile_dir.go index 0dc426ee2de52..075057f94b5d7 100644 --- a/pkg/security/security_profile/profile/profile_dir.go +++ b/pkg/security/security_profile/profile/profile_dir.go @@ -21,6 +21,7 @@ import ( "golang.org/x/exp/slices" proto "github.com/DataDog/agent-payload/v5/cws/dumpsv1" + cgroupModel "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup/model" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/seclog" @@ -99,7 +100,7 @@ func (dp *DirectoryProvider) Start(ctx context.Context) error { if dp.watcherEnabled { var err error if dp.watcher, err = fsnotify.NewWatcher(); err != nil { - return err + return fmt.Errorf("coudln't setup inotify watcher: %w", err) } if err = dp.watcher.Add(dp.directory); err != nil { diff --git a/pkg/security/security_profile/profile/profile_proto_dec_v1.go b/pkg/security/security_profile/profile/profile_proto_dec_v1.go index 5ac3d820b6440..094dbc65df63f 100644 --- a/pkg/security/security_profile/profile/profile_proto_dec_v1.go +++ b/pkg/security/security_profile/profile/profile_proto_dec_v1.go @@ -15,6 +15,7 @@ import ( mtdt "github.com/DataDog/datadog-agent/pkg/security/security_profile/activity_tree/metadata" ) +// ProtoToSecurityProfile decodes a Security Profile from its protobuf representation func ProtoToSecurityProfile(output *SecurityProfile, input *proto.SecurityProfile) { if input == nil { return diff --git a/pkg/security/security_profile/profile/profile_proto_enc_v1.go b/pkg/security/security_profile/profile/profile_proto_enc_v1.go new file mode 100644 index 0000000000000..4dc35a5864401 --- /dev/null +++ b/pkg/security/security_profile/profile/profile_proto_enc_v1.go @@ -0,0 +1,35 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux +// +build linux + +package profile + +import ( + proto "github.com/DataDog/agent-payload/v5/cws/dumpsv1" + + "github.com/DataDog/datadog-agent/pkg/security/security_profile/activity_tree" + mtdt "github.com/DataDog/datadog-agent/pkg/security/security_profile/activity_tree/metadata" +) + +// SecurityProfileToProto incode a Security Profile to its protobuf representation +func SecurityProfileToProto(input *SecurityProfile) *proto.SecurityProfile { + if input == nil { + return nil + } + + output := proto.SecurityProfile{ + Status: uint32(input.Status), + Version: input.Version, + Metadata: mtdt.MetadataToProto(&input.Metadata), + Syscalls: input.Syscalls, + Tags: make([]string, len(input.Tags)), + Tree: activity_tree.ActivityTreeToProto(input.ActivityTree), + } + copy(output.Tags, input.Tags) + + return &output +} diff --git a/pkg/security/serializers/serializers.go b/pkg/security/serializers/serializers.go index b53d068933bb9..fec9ffb8d5830 100644 --- a/pkg/security/serializers/serializers.go +++ b/pkg/security/serializers/serializers.go @@ -21,6 +21,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/events" "github.com/DataDog/datadog-agent/pkg/security/resolvers" + sprocess "github.com/DataDog/datadog-agent/pkg/security/resolvers/process" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/utils" @@ -667,7 +668,7 @@ func newProcessSerializer(ps *model.Process, e *model.Event, resolvers *resolver if ps.IsNotKworker() { argv, argvTruncated := resolvers.ProcessResolver.GetProcessScrubbedArgv(ps) envs, EnvsTruncated := resolvers.ProcessResolver.GetProcessEnvs(ps) - argv0, _ := resolvers.ProcessResolver.GetProcessArgv0(ps) + argv0, _ := sprocess.GetProcessArgv0(ps) psSerializer := &ProcessSerializer{ ForkTime: getTimeIfNotZero(ps.ForkTime), diff --git a/pkg/security/tests/activity_dumps_common.go b/pkg/security/tests/activity_dumps_common.go index 6db07c9a971df..bbb56aa3a2dfb 100644 --- a/pkg/security/tests/activity_dumps_common.go +++ b/pkg/security/tests/activity_dumps_common.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build functionaltests +//go:build functionaltests || stresstests package tests @@ -12,17 +12,23 @@ import ( "os" "path/filepath" "testing" + "time" "github.com/DataDog/datadog-agent/pkg/security/security_profile/dump" "github.com/DataDog/datadog-agent/pkg/security/security_profile/profile" ) // v see test/kitchen/test/integration/security-agent-test/rspec/security-agent-test_spec.rb -const dedicatedADNodeForTestsEnv = "DEDICATED_ACTIVITY_DUMP_NODE" +const ( + dedicatedADNodeForTestsEnv = "DEDICATED_ACTIVITY_DUMP_NODE" + testActivityDumpRateLimiter = 200 + testActivityDumpTracedCgroupsCount = 3 +) -const testActivityDumpRateLimiter = 200 -const testActivityDumpTracedCgroupsCount = 3 -const testActivityDumpCgroupDumpTimeout = 11 // probe.MinDumpTimeout(10) + 1 +var ( + testActivityDumpDuration = time.Second * 30 + testActivityDumpLoadControllerPeriod = time.Second * 10 +) func validateActivityDumpOutputs(t *testing.T, test *testModule, expectedFormats []string, outputFiles []string, activityDumpValidator func(ad *dump.ActivityDump) bool, diff --git a/pkg/security/tests/activity_dumps_loadcontroller_test.go b/pkg/security/tests/activity_dumps_loadcontroller_test.go index 37676817a4b90..b4385652821fa 100644 --- a/pkg/security/tests/activity_dumps_loadcontroller_test.go +++ b/pkg/security/tests/activity_dumps_loadcontroller_test.go @@ -8,7 +8,6 @@ package tests import ( - "fmt" "os" "path/filepath" "testing" @@ -37,16 +36,19 @@ func TestActivityDumpsLoadControllerTimeout(t *testing.T) { defer os.RemoveAll(outputDir) expectedFormats := []string{"json", "protobuf"} testActivityDumpTracedEventTypes := []string{"exec", "open", "syscalls", "dns", "bind"} - test, err := newTestModule(t, nil, []*rules.RuleDefinition{}, testOpts{ + opts := testOpts{ enableActivityDump: true, activityDumpRateLimiter: testActivityDumpRateLimiter, activityDumpTracedCgroupsCount: testActivityDumpTracedCgroupsCount, - activityDumpCgroupDumpTimeout: testActivityDumpCgroupDumpTimeout, + activityDumpDuration: time.Minute + 10*time.Second, activityDumpLocalStorageDirectory: outputDir, activityDumpLocalStorageCompression: false, activityDumpLocalStorageFormats: expectedFormats, activityDumpTracedEventTypes: testActivityDumpTracedEventTypes, - }) + activityDumpLoadControllerPeriod: testActivityDumpLoadControllerPeriod, + activityDumpLoadControllerTimeout: time.Minute, + } + test, err := newTestModule(t, nil, []*rules.RuleDefinition{}, opts) if err != nil { t.Fatal(err) } @@ -63,17 +65,22 @@ func TestActivityDumpsLoadControllerTimeout(t *testing.T) { t.Fatal(err) } defer dockerInstance.stop() - assert.Equal(t, fmt.Sprintf("%dm0s", testActivityDumpCgroupDumpTimeout), dump.Timeout) + assert.Equal(t, opts.activityDumpDuration.String(), dump.Timeout) - // trigg reducer (before t > timeout / 4) - test.triggerLoadControlerReducer(dockerInstance, dump) + // trigger reducer (before t > timeout / 4) + test.triggerLoadControllerReducer(dockerInstance, dump) // find the new dump, with timeout *= 3/4, or min timeout secondDump, err := test.findNextPartialDump(dockerInstance, dump) if err != nil { t.Fatal(err) } - assert.Equal(t, fmt.Sprintf("%dm0s", activitydump.MinDumpTimeout/time.Minute), secondDump.Timeout) + + timeout, err := time.ParseDuration(secondDump.Timeout) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, opts.activityDumpLoadControllerTimeout, timeout) } func TestActivityDumpsLoadControllerEventTypes(t *testing.T) { @@ -96,11 +103,12 @@ func TestActivityDumpsLoadControllerEventTypes(t *testing.T) { enableActivityDump: true, activityDumpRateLimiter: testActivityDumpRateLimiter, activityDumpTracedCgroupsCount: testActivityDumpTracedCgroupsCount, - activityDumpCgroupDumpTimeout: testActivityDumpCgroupDumpTimeout, + activityDumpDuration: testActivityDumpDuration, activityDumpLocalStorageDirectory: outputDir, activityDumpLocalStorageCompression: false, activityDumpLocalStorageFormats: expectedFormats, activityDumpTracedEventTypes: testActivityDumpTracedEventTypes, + activityDumpLoadControllerPeriod: testActivityDumpLoadControllerPeriod, }) if err != nil { t.Fatal(err) @@ -124,29 +132,42 @@ func TestActivityDumpsLoadControllerEventTypes(t *testing.T) { defer dockerInstance.stop() for activeEventTypes := activitydump.TracedEventTypesReductionOrder; ; activeEventTypes = activeEventTypes[1:] { - // add all event types to the dump - test.addAllEventTypesOnDump(dockerInstance, dump, syscallTester) - time.Sleep(time.Second * 3) - // trigg reducer - test.triggerLoadControlerReducer(dockerInstance, dump) - // find the new dump - nextDump, err := test.findNextPartialDump(dockerInstance, dump) - if err != nil { - t.Fatal(err) - } - - // extract all present event types present on the first dump - presentEventTypes, err := test.extractAllDumpEventTypes(dump) - if err != nil { - t.Fatal(err) + testName := "" + for i, activeEventType := range activeEventTypes { + if i > 0 { + testName += "-" + } + testName += activeEventType.String() } - if !isEventTypesStringSlicesEqual(activeEventTypes, presentEventTypes) { - t.Fatalf("Dump's event types are different as expected (%v) vs (%v)", activeEventTypes, presentEventTypes) + if testName == "" { + testName = "none" } + t.Run(testName, func(t *testing.T) { + // add all event types to the dump + test.addAllEventTypesOnDump(dockerInstance, dump, syscallTester) + time.Sleep(time.Second * 3) + // trigger reducer + test.triggerLoadControllerReducer(dockerInstance, dump) + // find the new dump + nextDump, err := test.findNextPartialDump(dockerInstance, dump) + if err != nil { + t.Fatal(err) + } + + // extract all present event types present on the first dump + presentEventTypes, err := test.extractAllDumpEventTypes(dump) + if err != nil { + t.Fatal(err) + } + if !isEventTypesStringSlicesEqual(activeEventTypes, presentEventTypes) { + t.Fatalf("Dump's event types are different as expected (%v) vs (%v)", activeEventTypes, presentEventTypes) + } + dump = nextDump + }) + if len(activeEventTypes) == 0 { break } - dump = nextDump } } @@ -170,11 +191,12 @@ func TestActivityDumpsLoadControllerRateLimiter(t *testing.T) { enableActivityDump: true, activityDumpRateLimiter: testActivityDumpRateLimiter, activityDumpTracedCgroupsCount: testActivityDumpTracedCgroupsCount, - activityDumpCgroupDumpTimeout: testActivityDumpCgroupDumpTimeout, + activityDumpDuration: testActivityDumpDuration, activityDumpLocalStorageDirectory: outputDir, activityDumpLocalStorageCompression: false, activityDumpLocalStorageFormats: expectedFormats, activityDumpTracedEventTypes: testActivityDumpTracedEventTypes, + activityDumpLoadControllerPeriod: testActivityDumpLoadControllerPeriod, }) if err != nil { t.Fatal(err) @@ -203,7 +225,7 @@ func TestActivityDumpsLoadControllerRateLimiter(t *testing.T) { test.dockerCreateFiles(dockerInstance, syscallTester, testDir, testActivityDumpRateLimiter*2) time.Sleep(time.Second * 3) // trigg reducer - test.triggerLoadControlerReducer(dockerInstance, dump) + test.triggerLoadControllerReducer(dockerInstance, dump) // find the new dump, with ratelimiter *= 3/4 secondDump, err := test.findNextPartialDump(dockerInstance, dump) if err != nil { @@ -224,7 +246,7 @@ func TestActivityDumpsLoadControllerRateLimiter(t *testing.T) { test.dockerCreateFiles(dockerInstance, syscallTester, testDir, testActivityDumpRateLimiter*2) time.Sleep(time.Second * 3) // trigg reducer - test.triggerLoadControlerReducer(dockerInstance, dump) + test.triggerLoadControllerReducer(dockerInstance, dump) // find the new dump, with ratelimiter *= 3/4 _, err = test.findNextPartialDump(dockerInstance, dump) if err != nil { diff --git a/pkg/security/tests/activity_dumps_test.go b/pkg/security/tests/activity_dumps_test.go index 141faf320ba44..54e3af4fd8f15 100644 --- a/pkg/security/tests/activity_dumps_test.go +++ b/pkg/security/tests/activity_dumps_test.go @@ -24,6 +24,8 @@ import ( "github.com/stretchr/testify/assert" ) +var testActivityDumpCleanupPeriod = 15 * time.Second + func TestActivityDumps(t *testing.T) { // skip test that are about to be run on docker (to avoid trying spawning docker in docker) if testEnvironment == DockerEnvironment { @@ -44,11 +46,12 @@ func TestActivityDumps(t *testing.T) { enableActivityDump: true, activityDumpRateLimiter: testActivityDumpRateLimiter, activityDumpTracedCgroupsCount: testActivityDumpTracedCgroupsCount, - activityDumpCgroupDumpTimeout: testActivityDumpCgroupDumpTimeout, + activityDumpDuration: testActivityDumpDuration, activityDumpLocalStorageDirectory: outputDir, activityDumpLocalStorageCompression: false, activityDumpLocalStorageFormats: expectedFormats, activityDumpTracedEventTypes: testActivityDumpTracedEventTypes, + activityDumpCleanupPeriod: testActivityDumpCleanupPeriod, }) if err != nil { t.Fatal(err) @@ -80,7 +83,7 @@ func TestActivityDumps(t *testing.T) { } validateActivityDumpOutputs(t, test, expectedFormats, dump.OutputFiles, func(ad *activitydump.ActivityDump) bool { - nodes := ad.FindMatchingRootNodes("syscall_tester") + nodes := ad.FindMatchingRootNodes(syscallTester) if nodes == nil { t.Fatalf("Node not found in activity dump: %+v", nodes) } @@ -160,7 +163,7 @@ func TestActivityDumps(t *testing.T) { } validateActivityDumpOutputs(t, test, expectedFormats, dump.OutputFiles, func(ad *activitydump.ActivityDump) bool { - nodes := ad.FindMatchingRootNodes("syscall_tester") + nodes := ad.FindMatchingRootNodes(syscallTester) if nodes == nil { t.Fatalf("Node not found in activity dump: %+v", nodes) } @@ -205,8 +208,7 @@ func TestActivityDumps(t *testing.T) { } validateActivityDumpOutputs(t, test, expectedFormats, dump.OutputFiles, func(ad *activitydump.ActivityDump) bool { - // searching busybox instead of nslookup because we test on a busybox based alpine - nodes := ad.FindMatchingRootNodes("busybox") + nodes := ad.FindMatchingRootNodes("nslookup") if nodes == nil { t.Fatal("Node not found in activity dump") } @@ -246,8 +248,7 @@ func TestActivityDumps(t *testing.T) { tempPathParts := strings.Split(temp.Name(), "/") validateActivityDumpOutputs(t, test, expectedFormats, dump.OutputFiles, func(ad *activitydump.ActivityDump) bool { - // searching busybox instead of touch because we test on a busybox based alpine - nodes := ad.FindMatchingRootNodes("busybox") + nodes := ad.FindMatchingRootNodes("touch") if nodes == nil { t.Fatal("Node not found in activity dump") } @@ -289,28 +290,28 @@ func TestActivityDumps(t *testing.T) { } validateActivityDumpOutputs(t, test, expectedFormats, dump.OutputFiles, func(ad *activitydump.ActivityDump) bool { - nodes := ad.FindMatchingRootNodes("syscall_tester") + nodes := ad.FindMatchingRootNodes(syscallTester) if nodes == nil { t.Fatal("Node not found in activity dump") } - var exitOK, execveOK bool + var exitOK, bindOK bool for _, node := range nodes { for _, s := range node.Syscalls { if s == int(model.SysExit) || s == int(model.SysExitGroup) { exitOK = true } - if s == int(model.SysExecve) || s == int(model.SysExecveat) { - execveOK = true + if s == int(model.SysBind) { + bindOK = true } } } if !exitOK { t.Errorf("exit syscall not found in activity dump") } - if !execveOK { - t.Errorf("execve syscall not found in activity dump") + if !bindOK { + t.Errorf("bind syscall not found in activity dump") } - return exitOK && execveOK + return exitOK && bindOK }, nil) }) @@ -345,7 +346,7 @@ func TestActivityDumps(t *testing.T) { } validateActivityDumpOutputs(t, test, expectedFormats, dump.OutputFiles, func(ad *activitydump.ActivityDump) bool { - nodes := ad.FindMatchingRootNodes("syscall_tester") + nodes := ad.FindMatchingRootNodes(syscallTester) if nodes == nil { t.Fatal("Node not found in activity dump") } @@ -383,11 +384,11 @@ func TestActivityDumps(t *testing.T) { defer dockerInstance.stop() // check that the dump is still alive - time.Sleep((testActivityDumpCgroupDumpTimeout*60 - 20) * time.Second) + time.Sleep(testActivityDumpDuration - 10*time.Second) assert.Equal(t, true, test.isDumpRunning(dump)) - // check that the dump has timeouted after the cleanup period (30s) + 2s - time.Sleep(1 * time.Minute) + // check that the dump has timeouted after the cleanup period + 10s + 2s + time.Sleep(testActivityDumpCleanupPeriod + 12*time.Second) assert.Equal(t, false, test.isDumpRunning(dump)) }) diff --git a/pkg/security/tests/arithmetic_operations_test.go b/pkg/security/tests/arithmetic_operations_test.go new file mode 100644 index 0000000000000..3680265acf9db --- /dev/null +++ b/pkg/security/tests/arithmetic_operations_test.go @@ -0,0 +1,127 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build functionaltests +// +build functionaltests + +package tests + +import ( + "os/exec" + "testing" + + "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/secl/rules" +) + +func TestArithmeticOperation(t *testing.T) { + + // Need to add additional conditions so that the event type can be inferred + ruleDefs := []*rules.RuleDefinition{ + { + ID: "test_simple_addition", + Expression: `1 + 2 == 5 - 2 && exec.comm in ["ls"]`, + }, + { + ID: "test_simple_addition_false", + Expression: `1 + 2 != 3 && exec.comm in ["ls"]`, + }, + { + ID: "test_more_complex", + Expression: `1 + 2 - 3 + 4 == 4 && exec.comm in ["cp"]`, + }, + { + ID: "test_with_parentheses", + Expression: `1 - 2 + 3 - (1 - 4) - (1 - 5) == 9 && exec.comm in ["pwd"]`, + }, + { + ID: "test_with_time", + Expression: `10s + 40s == 50s && exec.comm in ["cat"]`, + }, + { + ID: "test_with_time_2", + Expression: `process.created_at < 5s && exec.comm in ["grep"]`, + }, + { + ID: "test_with_time_3", + Expression: `event.timestamp - process.created_at + 3s <= 5s && exec.comm in ["echo"]`, + }, + } + + test, err := newTestModule(t, nil, ruleDefs, testOpts{}) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + t.Run("test_simple_addition", func(t *testing.T) { + test.WaitSignal(t, func() error { + cmd := exec.Command("ls") + cmd.Run() + return nil + }, func(event *model.Event, rule *rules.Rule) { + assertTriggeredRule(t, rule, "test_simple_addition") + }) + }) + t.Run("test_simple_addition_false", func(t *testing.T) { + test.WaitSignal(t, func() error { + cmd := exec.Command("ls") + cmd.Run() + return nil + }, func(event *model.Event, rule *rules.Rule) { + assertNotTriggeredRule(t, rule, "test_simple_addition_false") + }) + }) + + t.Run("test_more_complex", func(t *testing.T) { + test.WaitSignal(t, func() error { + cmd := exec.Command("cp") + cmd.Run() + return nil + }, func(event *model.Event, rule *rules.Rule) { + assertTriggeredRule(t, rule, "test_more_complex") + }) + }) + + t.Run("test_with_parentheses", func(t *testing.T) { + test.WaitSignal(t, func() error { + cmd := exec.Command("pwd") + cmd.Run() + return nil + }, func(event *model.Event, rule *rules.Rule) { + assertTriggeredRule(t, rule, "test_with_parentheses") + }) + }) + + t.Run("test_with_time", func(t *testing.T) { + test.WaitSignal(t, func() error { + cmd := exec.Command("cat") + cmd.Run() + return nil + }, func(event *model.Event, rule *rules.Rule) { + assertTriggeredRule(t, rule, "test_with_time") + }) + }) + + t.Run("test_with_time_2", func(t *testing.T) { + test.WaitSignal(t, func() error { + cmd := exec.Command("grep") + cmd.Run() + return nil + }, func(event *model.Event, rule *rules.Rule) { + assertTriggeredRule(t, rule, "test_with_time_2") + }) + }) + + t.Run("test_with_time_3", func(t *testing.T) { + test.WaitSignal(t, func() error { + cmd := exec.Command("echo") + cmd.Run() + return nil + }, func(event *model.Event, rule *rules.Rule) { + assertTriggeredRule(t, rule, "test_with_time_3") + }) + }) +} diff --git a/pkg/security/tests/cmdwrapper.go b/pkg/security/tests/cmdwrapper.go index 02d15e09d1809..2aadc9b54e9ed 100644 --- a/pkg/security/tests/cmdwrapper.go +++ b/pkg/security/tests/cmdwrapper.go @@ -93,7 +93,7 @@ func (d *dockerCmdWrapper) Command(bin string, args []string, envs []string) *ex func (d *dockerCmdWrapper) start() ([]byte, error) { d.containerName = fmt.Sprintf("docker-wrapper-%s", utils.RandString(6)) - cmd := exec.Command(d.executable, "run", "--rm", "-d", "--name", d.containerName, "-v", d.mountSrc+":"+d.mountDest, d.image, "sleep", "600") + cmd := exec.Command(d.executable, "run", "--rm", "-d", "--name", d.containerName, "-v", d.mountSrc+":"+d.mountDest, d.image, "sleep", "1200") out, err := cmd.CombinedOutput() if err == nil { d.containerID = strings.TrimSpace(string(out)) diff --git a/pkg/security/tests/module_tester.go b/pkg/security/tests/module_tester.go index 85106bb510552..ebb01a7965105 100644 --- a/pkg/security/tests/module_tester.go +++ b/pkg/security/tests/module_tester.go @@ -122,7 +122,16 @@ runtime_security_config: rate_limiter: {{ .ActivityDumpRateLimiter }} tag_rules: enabled: {{ .ActivityDumpTagRules }} - dump_duration: {{ .ActivityDumpCgroupDumpTimeout }}s + dump_duration: {{ .ActivityDumpDuration }} + {{if .ActivityDumpLoadControllerPeriod }} + load_controller_period: {{ .ActivityDumpLoadControllerPeriod }} + {{end}} + {{if .ActivityDumpCleanupPeriod }} + cleanup_period: {{ .ActivityDumpCleanupPeriod }} + {{end}} + {{if .ActivityDumpLoadControllerTimeout }} + min_timeout: {{ .ActivityDumpLoadControllerTimeout }} + {{end}} traced_cgroups_count: {{ .ActivityDumpTracedCgroupsCount }} traced_event_types: {{range .ActivityDumpTracedEventTypes}} - {{.}} @@ -140,8 +149,8 @@ runtime_security_config: dir: {{ .SecurityProfileDir }} watch_dir: {{ .SecurityProfileWatchDir }} anomaly_detection: - minimum_stable_period: {{.AnomalyDetectionMinimumStablePeriod}}s - workload_warmup_period: {{.AnomalyDetectionWarmupPeriod}}s + minimum_stable_period: {{.AnomalyDetectionMinimumStablePeriod}} + workload_warmup_period: {{.AnomalyDetectionWarmupPeriod}} {{end}} self_test: @@ -219,7 +228,10 @@ type testOpts struct { enableActivityDump bool activityDumpRateLimiter int activityDumpTagRules bool - activityDumpCgroupDumpTimeout int + activityDumpDuration time.Duration + activityDumpLoadControllerPeriod time.Duration + activityDumpCleanupPeriod time.Duration + activityDumpLoadControllerTimeout time.Duration activityDumpTracedCgroupsCount int activityDumpTracedEventTypes []string activityDumpLocalStorageDirectory string @@ -228,8 +240,8 @@ type testOpts struct { enableSecurityProfile bool securityProfileDir string securityProfileWatchDir bool - anomalyDetectionMinimumStablePeriod int - anomalyDetectionWarmupPeriod int + anomalyDetectionMinimumStablePeriod time.Duration + anomalyDetectionWarmupPeriod time.Duration disableDiscarders bool eventsCountThreshold int disableERPCDentryResolution bool @@ -256,8 +268,10 @@ func (to testOpts) Equal(opts testOpts) bool { to.enableActivityDump == opts.enableActivityDump && to.activityDumpRateLimiter == opts.activityDumpRateLimiter && to.activityDumpTagRules == opts.activityDumpTagRules && - to.activityDumpCgroupDumpTimeout == opts.activityDumpCgroupDumpTimeout && + to.activityDumpDuration == opts.activityDumpDuration && + to.activityDumpLoadControllerPeriod == opts.activityDumpLoadControllerPeriod && to.activityDumpTracedCgroupsCount == opts.activityDumpTracedCgroupsCount && + to.activityDumpLoadControllerTimeout == opts.activityDumpLoadControllerTimeout && reflect.DeepEqual(to.activityDumpTracedEventTypes, opts.activityDumpTracedEventTypes) && to.activityDumpLocalStorageDirectory == opts.activityDumpLocalStorageDirectory && to.activityDumpLocalStorageCompression == opts.activityDumpLocalStorageCompression && @@ -398,6 +412,12 @@ func assertTriggeredRule(tb testing.TB, r *rules.Rule, id string) bool { return assert.Equal(tb, id, r.ID, "wrong triggered rule") } +//nolint:deadcode,unused +func assertNotTriggeredRule(tb testing.TB, r *rules.Rule, id string) bool { + tb.Helper() + return assert.NotEqual(tb, id, r.ID, "wrong triggered rule") +} + //nolint:deadcode,unused func assertReturnValue(tb testing.TB, retval, expected int64) bool { tb.Helper() @@ -702,8 +722,8 @@ func genTestConfigs(dir string, opts testOpts, testDir string) (*emconfig.Config opts.activityDumpTracedCgroupsCount = 5 } - if opts.activityDumpCgroupDumpTimeout == 0 { - opts.activityDumpTracedCgroupsCount = 30 + if opts.activityDumpDuration == 0 { + opts.activityDumpDuration = testActivityDumpDuration } if len(opts.activityDumpTracedEventTypes) == 0 { @@ -741,7 +761,10 @@ func genTestConfigs(dir string, opts testOpts, testDir string) (*emconfig.Config "EnableActivityDump": opts.enableActivityDump, "ActivityDumpRateLimiter": opts.activityDumpRateLimiter, "ActivityDumpTagRules": opts.activityDumpTagRules, - "ActivityDumpCgroupDumpTimeout": opts.activityDumpCgroupDumpTimeout, + "ActivityDumpDuration": opts.activityDumpDuration, + "ActivityDumpLoadControllerPeriod": opts.activityDumpLoadControllerPeriod, + "ActivityDumpLoadControllerTimeout": opts.activityDumpLoadControllerTimeout, + "ActivityDumpCleanupPeriod": opts.activityDumpCleanupPeriod, "ActivityDumpTracedCgroupsCount": opts.activityDumpTracedCgroupsCount, "ActivityDumpTracedEventTypes": opts.activityDumpTracedEventTypes, "ActivityDumpLocalStorageDirectory": opts.activityDumpLocalStorageDirectory, @@ -1689,7 +1712,7 @@ func (tm *testModule) StartActivityDumpComm(comm string, outputDir string, forma } p := &api.ActivityDumpParams{ Comm: comm, - Timeout: 1, + Timeout: "1m", DifferentiateArgs: true, Storage: &api.StorageRequestParams{ LocalStorageDirectory: outputDir, @@ -1909,7 +1932,7 @@ func (tm *testModule) addAllEventTypesOnDump(dockerInstance *dockerCmdWrapper, i } //nolint:deadcode,unused -func (tm *testModule) triggerLoadControlerReducer(dockerInstance *dockerCmdWrapper, id *activityDumpIdentifier) { +func (tm *testModule) triggerLoadControllerReducer(dockerInstance *dockerCmdWrapper, id *activityDumpIdentifier) { monitor := tm.probe.GetMonitor() if monitor == nil { return @@ -2146,3 +2169,35 @@ func WalkActivityTree(at *activity_tree.ActivityTree, walkFunc func(node *Proces } return result } + +func (tm *testModule) GetADSelector(dumpID *activityDumpIdentifier) (*cgroupModel.WorkloadSelector, error) { + ad, err := tm.getADFromDumpId(dumpID) + if err != nil { + return nil, err + } + + selector, err := cgroupModel.NewWorkloadSelector(utils.GetTagValue("image_name", ad.Tags), utils.GetTagValue("image_tag", ad.Tags)) + return &selector, err +} + +func (tm *testModule) SetProfileStatus(selector *cgroupModel.WorkloadSelector, newStatus model.Status) error { + monitor := tm.probe.GetMonitor() + if monitor == nil { + return errors.New("No monitor") + } + + spm := monitor.GetSecurityProfileManager() + if spm == nil { + return errors.New("No security profile manager") + } + + profile := spm.GetProfile(*selector) + if profile == nil || profile.Status == 0 { + return errors.New("No profile found for given selector") + } + + profile.Lock() + profile.Status = newStatus + profile.Unlock() + return nil +} diff --git a/pkg/security/tests/process_test.go b/pkg/security/tests/process_test.go index b7e7fa855c53e..c835a7d9b2032 100644 --- a/pkg/security/tests/process_test.go +++ b/pkg/security/tests/process_test.go @@ -2098,7 +2098,7 @@ func TestProcessResolution(t *testing.T) { t.Errorf("not able to resolve the entry") } - mapsEntry := resolvers.ProcessResolver.ResolveFromKernelMaps(pid, pid) + mapsEntry := resolvers.ProcessResolver.ResolveFromKernelMaps(pid, pid, inode) if mapsEntry == nil { t.Errorf("not able to resolve the entry") } diff --git a/pkg/security/tests/security_profile_test.go b/pkg/security/tests/security_profile_test.go index 080249dfd9c56..d83efcff71410 100644 --- a/pkg/security/tests/security_profile_test.go +++ b/pkg/security/tests/security_profile_test.go @@ -45,7 +45,7 @@ func TestSecurityProfile(t *testing.T) { enableActivityDump: true, activityDumpRateLimiter: 200, activityDumpTracedCgroupsCount: 3, - activityDumpCgroupDumpTimeout: 10, + activityDumpDuration: testActivityDumpDuration, activityDumpLocalStorageDirectory: outputDir, activityDumpLocalStorageCompression: false, activityDumpLocalStorageFormats: expectedFormats, @@ -183,7 +183,7 @@ func TestSecurityProfile(t *testing.T) { validateActivityDumpOutputs(t, test, expectedFormats, dump.OutputFiles, nil, func(sp *profile.SecurityProfile) bool { nodes := WalkActivityTree(sp.ActivityTree, func(node *ProcessNodeAndParent) bool { - if node.Node.Process.FileEvent.BasenameStr == "busybox" { + if node.Node.Process.Argv0 == "nslookup" { return true } return false @@ -227,7 +227,7 @@ func TestAnomalyDetection(t *testing.T) { enableActivityDump: true, activityDumpRateLimiter: 200, activityDumpTracedCgroupsCount: 3, - activityDumpCgroupDumpTimeout: 10, + activityDumpDuration: testActivityDumpDuration, activityDumpLocalStorageDirectory: outputDir, activityDumpLocalStorageCompression: false, activityDumpLocalStorageFormats: expectedFormats, @@ -409,7 +409,7 @@ func TestAnomalyDetectionWarmup(t *testing.T) { enableActivityDump: true, activityDumpRateLimiter: 200, activityDumpTracedCgroupsCount: 3, - activityDumpCgroupDumpTimeout: 10, + activityDumpDuration: testActivityDumpDuration, activityDumpLocalStorageDirectory: outputDir, activityDumpLocalStorageCompression: false, activityDumpLocalStorageFormats: expectedFormats, @@ -418,7 +418,7 @@ func TestAnomalyDetectionWarmup(t *testing.T) { securityProfileDir: outputDir, securityProfileWatchDir: true, anomalyDetectionMinimumStablePeriod: 0, - anomalyDetectionWarmupPeriod: 17, + anomalyDetectionWarmupPeriod: 17 * time.Second, }) if err != nil { t.Fatal(err) @@ -511,12 +511,12 @@ func TestSecurityProfileReinsertionPeriod(t *testing.T) { outputDir := t.TempDir() os.MkdirAll(outputDir, 0755) defer os.RemoveAll(outputDir) - reinsertPeriod := 10 + test, err := newTestModule(t, nil, []*rules.RuleDefinition{}, testOpts{ enableActivityDump: true, activityDumpRateLimiter: 200, activityDumpTracedCgroupsCount: 3, - activityDumpCgroupDumpTimeout: 10, + activityDumpDuration: testActivityDumpDuration, activityDumpLocalStorageDirectory: outputDir, activityDumpLocalStorageCompression: false, activityDumpLocalStorageFormats: expectedFormats, @@ -524,7 +524,7 @@ func TestSecurityProfileReinsertionPeriod(t *testing.T) { enableSecurityProfile: true, securityProfileDir: outputDir, securityProfileWatchDir: true, - anomalyDetectionMinimumStablePeriod: reinsertPeriod, + anomalyDetectionMinimumStablePeriod: 10 * time.Second, }) if err != nil { t.Fatal(err) @@ -676,3 +676,141 @@ func TestSecurityProfileReinsertionPeriod(t *testing.T) { }) } + +func TestSecurityProfileAutoSuppression(t *testing.T) { + // skip test that are about to be run on docker (to avoid trying spawning docker in docker) + if testEnvironment == DockerEnvironment { + t.Skip("Skip test spawning docker containers on docker") + } + if _, err := whichNonFatal("docker"); err != nil { + t.Skip("Skip test where docker is unavailable") + } + if !IsDedicatedNode(dedicatedADNodeForTestsEnv) { + t.Skip("Skip test when not run in dedicated env") + } + + var expectedFormats = []string{"profile", "protobuf"} + var testActivityDumpTracedEventTypes = []string{"exec", "open", "syscalls", "dns", "bind"} + + outputDir := t.TempDir() + os.MkdirAll(outputDir, 0755) + defer os.RemoveAll(outputDir) + reinsertPeriod := 10 * time.Second + rulesDef := []*rules.RuleDefinition{ + { + ID: "test_autosuppression_exec", + Expression: `exec.file.name == "getconf"`, + }, + { + ID: "test_autosuppression_dns", + Expression: `dns.question.type == A && dns.question.name == "foo.bar"`, + }, + } + test, err := newTestModule(t, nil, rulesDef, testOpts{ + enableActivityDump: true, + activityDumpRateLimiter: 200, + activityDumpTracedCgroupsCount: 3, + activityDumpDuration: testActivityDumpDuration, + activityDumpLocalStorageDirectory: outputDir, + activityDumpLocalStorageCompression: false, + activityDumpLocalStorageFormats: expectedFormats, + activityDumpTracedEventTypes: testActivityDumpTracedEventTypes, + enableSecurityProfile: true, + securityProfileDir: outputDir, + securityProfileWatchDir: true, + anomalyDetectionMinimumStablePeriod: reinsertPeriod, + }) + if err != nil { + t.Fatal(err) + } + defer test.Close() + syscallTester, err := loadSyscallTester(t, test, "syscall_tester") + if err != nil { + t.Fatal(err) + } + + dockerInstance, dump, err := test.StartADockerGetDump() + if err != nil { + t.Fatal(err) + } + defer dockerInstance.stop() + + time.Sleep(time.Second * 1) // to ensure we did not get ratelimited + cmd := dockerInstance.Command(syscallTester, []string{"sleep", "1"}, []string{}) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + time.Sleep(1 * time.Second) // a quick sleep to let events to be added to the dump + + t.Run("auto-suppression-process-signal", func(t *testing.T) { + // check that we generate an event during profile learning phase + test.WaitSignal(t, func() error { + cmd := dockerInstance.Command("getconf", []string{"-a"}, []string{}) + _, err = cmd.CombinedOutput() + return err + }, func(event *model.Event, rule *rules.Rule) { + assertTriggeredRule(t, rule, "test_autosuppression_exec") + assert.Equal(t, "getconf", event.ProcessContext.FileEvent.BasenameStr, "wrong exec file") + }) + }) + + t.Run("auto-suppression-dns-signal", func(t *testing.T) { + // check that we generate an event during profile learning phase + test.WaitSignal(t, func() error { + cmd := dockerInstance.Command("nslookup", []string{"foo.bar"}, []string{}) + _, err = cmd.CombinedOutput() + return err + }, func(event *model.Event, rule *rules.Rule) { + assertTriggeredRule(t, rule, "test_autosuppression_dns") + assert.Equal(t, "nslookup", event.ProcessContext.Argv0, "wrong exec file") + }) + }) + + err = test.StopActivityDump(dump.Name, "", "") + if err != nil { + t.Fatal(err) + } + time.Sleep(6 * time.Second) // a quick sleep to let the profile to be loaded (5sec debounce + 1sec spare) + + // get AD selector and force the auto-suppression mode + selector, err := test.GetADSelector(dump) + if err != nil { + t.Fatal(err) + } + if err := test.SetProfileStatus(selector, model.AutoSuppression); err != nil { + t.Fatal(err) + } + + t.Run("auto-suppression-process-suppression", func(t *testing.T) { + // check we autosuppres signals + err = test.GetSignal(t, func() error { + cmd := dockerInstance.Command("getconf", []string{"-a"}, []string{}) + _, err = cmd.CombinedOutput() + return err + }, func(event *model.Event, rule *rules.Rule) { + if event.ProcessContext.ContainerID == dump.ContainerID { + t.Fatal("Got a signal that should have been suppressed") + } + }) + if err != nil && !strings.HasPrefix(err.Error(), "timeout") { + t.Fatal("Got an error different from timeout") + } + }) + + t.Run("auto-suppression-dns-suppression", func(t *testing.T) { + // check we autosuppres signals + err = test.GetSignal(t, func() error { + cmd := dockerInstance.Command("nslookup", []string{"foo.bar"}, []string{}) + _, err = cmd.CombinedOutput() + return err + }, func(event *model.Event, rule *rules.Rule) { + if event.ProcessContext.ContainerID == dump.ContainerID { + t.Fatal("Got a signal that should have been suppressed") + } + }) + if err != nil && !strings.HasPrefix(err.Error(), "timeout") { + t.Fatal("Got an error different from timeout") + } + }) +} diff --git a/pkg/security/tests/threat_score_test.go b/pkg/security/tests/threat_score_test.go index 64fccefd434a1..f951654c0ec3b 100644 --- a/pkg/security/tests/threat_score_test.go +++ b/pkg/security/tests/threat_score_test.go @@ -66,7 +66,7 @@ func TestActivityDumpsThreatScore(t *testing.T) { enableActivityDump: true, activityDumpRateLimiter: testActivityDumpRateLimiter, activityDumpTracedCgroupsCount: testActivityDumpTracedCgroupsCount, - activityDumpCgroupDumpTimeout: testActivityDumpCgroupDumpTimeout, + activityDumpDuration: testActivityDumpDuration, activityDumpLocalStorageDirectory: outputDir, activityDumpLocalStorageCompression: false, activityDumpLocalStorageFormats: expectedFormats, @@ -105,7 +105,7 @@ func TestActivityDumpsThreatScore(t *testing.T) { tempPathParts := strings.Split(filePath, "/") validateActivityDumpOutputs(t, test, expectedFormats, dump.OutputFiles, func(ad *activitydump.ActivityDump) bool { - nodes := ad.ActivityTree.FindMatchingRootNodes("busybox") + nodes := ad.ActivityTree.FindMatchingRootNodes("touch") if nodes == nil || len(nodes) != 1 { t.Fatal("Uniq node not found in activity dump") } @@ -154,7 +154,7 @@ func TestActivityDumpsThreatScore(t *testing.T) { } validateActivityDumpOutputs(t, test, expectedFormats, dump.OutputFiles, func(ad *activitydump.ActivityDump) bool { - nodes := ad.ActivityTree.FindMatchingRootNodes("busybox") + nodes := ad.ActivityTree.FindMatchingRootNodes("nslookup") if nodes == nil || len(nodes) != 1 { t.Fatal("Uniq node not found in activity dump") } @@ -195,7 +195,7 @@ func TestActivityDumpsThreatScore(t *testing.T) { } validateActivityDumpOutputs(t, test, expectedFormats, dump.OutputFiles, func(ad *activitydump.ActivityDump) bool { - nodes := ad.ActivityTree.FindMatchingRootNodes("syscall_tester") + nodes := ad.ActivityTree.FindMatchingRootNodes(syscallTester) if nodes == nil || len(nodes) != 1 { t.Fatal("Uniq node not found in activity dump") } @@ -241,7 +241,7 @@ func TestActivityDumpsThreatScore(t *testing.T) { } validateActivityDumpOutputs(t, test, expectedFormats, dump.OutputFiles, func(ad *activitydump.ActivityDump) bool { - nodes := ad.ActivityTree.FindMatchingRootNodes("syscall_tester") + nodes := ad.ActivityTree.FindMatchingRootNodes(syscallTester) if nodes == nil { t.Fatal("Node not found in activity dump") } diff --git a/pkg/security/utils/limiter.go b/pkg/security/utils/limiter.go new file mode 100644 index 0000000000000..6195a2bc13397 --- /dev/null +++ b/pkg/security/utils/limiter.go @@ -0,0 +1,52 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux + +package utils + +import ( + "time" + + "github.com/hashicorp/golang-lru/v2/simplelru" +) + +// Limiter defines generic rate limiter +type Limiter[K comparable] struct { + cache *simplelru.LRU[K, time.Time] + period time.Duration +} + +// NewLimiter returns a rate limiter +func NewLimiter[K comparable](size int, period time.Duration) (*Limiter[K], error) { + cache, err := simplelru.NewLRU[K, time.Time](size, nil) + if err != nil { + return nil, err + } + + return &Limiter[K]{ + cache: cache, + period: period, + }, nil +} + +// IsAllowed returns whether an entry is allowed or not +func (l *Limiter[K]) IsAllowed(k K) bool { + now := time.Now() + if ts, ok := l.cache.Get(k); ok { + if now.After(ts) { + l.cache.Remove(k) + } else { + return false + } + } + + return true +} + +// Count marks the key as used +func (l *Limiter[K]) Count(k K) { + l.cache.Add(k, time.Now().Add(l.period)) +} diff --git a/pkg/security/utils/proc.go b/pkg/security/utils/proc.go index 1de270daacd9d..fb604e31443d5 100644 --- a/pkg/security/utils/proc.go +++ b/pkg/security/utils/proc.go @@ -212,40 +212,40 @@ type FilledProcess struct { } // GetFilledProcess returns a FilledProcess from a Process input -func GetFilledProcess(p *process.Process) *FilledProcess { +func GetFilledProcess(p *process.Process) (*FilledProcess, error) { ppid, err := p.Ppid() if err != nil { - return nil + return nil, err } createTime, err := p.CreateTime() if err != nil { - return nil + return nil, err } uids, err := p.Uids() if err != nil { - return nil + return nil, err } gids, err := p.Gids() if err != nil { - return nil + return nil, err } name, err := p.Name() if err != nil { - return nil + return nil, err } memInfo, err := p.MemoryInfo() if err != nil { - return nil + return nil, err } cmdLine, err := p.CmdlineSlice() if err != nil { - return nil + return nil, err } return &FilledProcess{ @@ -257,7 +257,7 @@ func GetFilledProcess(p *process.Process) *FilledProcess { Gids: gids, MemInfo: memInfo, Cmdline: cmdLine, - } + }, nil } const MAX_ENV_VARS_COLLECTED = 256 @@ -323,6 +323,10 @@ func EnvVars(priorityEnvsPrefixes []string, pid int32) ([]string, bool, error) { } } + if envCounter > MAX_ENV_VARS_COLLECTED { + envCounter = MAX_ENV_VARS_COLLECTED + } + // second pass collecting scanner, err = newEnvScanner(f) if err != nil { diff --git a/pkg/serverless/otlp/otlp_test.go b/pkg/serverless/otlp/otlp_test.go index 9013007155abd..7f6e5c7d7586f 100644 --- a/pkg/serverless/otlp/otlp_test.go +++ b/pkg/serverless/otlp/otlp_test.go @@ -3,7 +3,8 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2021-present Datadog, Inc. -//go:build otlp +//go:build serverless && otlp +// +build serverless,otlp package otlp @@ -110,7 +111,8 @@ func testServerlessOTLPAgentReceivesTraces(client otlptrace.Client, traceChan <- select { case <-traceChan: - case <-time.After(10 * time.Second): + // 1 sec is the amount of time we wait when shutting down the daemon + case <-time.After(1 * time.Second): return fmt.Errorf("timeout waiting for span to arrive") } return nil diff --git a/pkg/tagger/collectors/workloadmeta_extract.go b/pkg/tagger/collectors/workloadmeta_extract.go index 11092f4b059b4..809d5320f570e 100644 --- a/pkg/tagger/collectors/workloadmeta_extract.go +++ b/pkg/tagger/collectors/workloadmeta_extract.go @@ -133,6 +133,8 @@ func (c *WorkloadMetaCollector) processEvents(evBundle workloadmeta.EventBundle) tagInfos = append(tagInfos, c.handleContainer(ev)...) case workloadmeta.KindKubernetesPod: tagInfos = append(tagInfos, c.handleKubePod(ev)...) + case workloadmeta.KindKubernetesNode: + tagInfos = append(tagInfos, c.handleKubeNode(ev)...) case workloadmeta.KindECSTask: tagInfos = append(tagInfos, c.handleECSTask(ev)...) case workloadmeta.KindContainerImageMetadata: @@ -392,6 +394,28 @@ func (c *WorkloadMetaCollector) handleKubePod(ev workloadmeta.Event) []*TagInfo return tagInfos } +func (c *WorkloadMetaCollector) handleKubeNode(ev workloadmeta.Event) []*TagInfo { + node := ev.Entity.(*workloadmeta.KubernetesNode) + + tags := utils.NewTagList() + + // Add tags for node here + + low, orch, high, standard := tags.Compute() + tagInfos := []*TagInfo{ + { + Source: nodeSource, + Entity: buildTaggerEntityID(node.EntityID), + HighCardTags: high, + OrchestratorCardTags: orch, + LowCardTags: low, + StandardTags: standard, + }, + } + + return tagInfos +} + func (c *WorkloadMetaCollector) handleECSTask(ev workloadmeta.Event) []*TagInfo { task := ev.Entity.(*workloadmeta.ECSTask) @@ -669,6 +693,8 @@ func buildTaggerEntityID(entityID workloadmeta.EntityID) string { return containers.BuildTaggerEntityName(entityID.ID) case workloadmeta.KindKubernetesPod: return kubelet.PodUIDToTaggerEntityName(entityID.ID) + case workloadmeta.KindKubernetesNode: + return kubelet.NodeUIDToTaggerEntityName(entityID.ID) case workloadmeta.KindECSTask: return fmt.Sprintf("ecs_task://%s", entityID.ID) case workloadmeta.KindContainerImageMetadata: diff --git a/pkg/tagger/collectors/workloadmeta_main.go b/pkg/tagger/collectors/workloadmeta_main.go index 08cf41a244324..b121782db169a 100644 --- a/pkg/tagger/collectors/workloadmeta_main.go +++ b/pkg/tagger/collectors/workloadmeta_main.go @@ -24,6 +24,7 @@ const ( staticSource = workloadmetaCollectorName + "-static" podSource = workloadmetaCollectorName + "-" + string(workloadmeta.KindKubernetesPod) + nodeSource = workloadmetaCollectorName + "-" + string(workloadmeta.KindKubernetesNode) taskSource = workloadmetaCollectorName + "-" + string(workloadmeta.KindECSTask) containerSource = workloadmetaCollectorName + "-" + string(workloadmeta.KindContainer) containerImageSource = workloadmetaCollectorName + "-" + string(workloadmeta.KindContainerImageMetadata) diff --git a/pkg/tagger/replay/tagger.go b/pkg/tagger/replay/tagger.go index 8cdc190603bd8..f58ecba96652a 100644 --- a/pkg/tagger/replay/tagger.go +++ b/pkg/tagger/replay/tagger.go @@ -10,7 +10,6 @@ import ( "time" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo" - pbutils "github.com/DataDog/datadog-agent/pkg/proto/utils" "github.com/DataDog/datadog-agent/pkg/status/health" tagger_api "github.com/DataDog/datadog-agent/pkg/tagger/api" "github.com/DataDog/datadog-agent/pkg/tagger/collectors" @@ -19,6 +18,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/tagger/types" "github.com/DataDog/datadog-agent/pkg/tagset" "github.com/DataDog/datadog-agent/pkg/util/log" + pbutils "github.com/DataDog/datadog-agent/pkg/util/proto" ) // Tagger stores tags to entity as stored in a replay state. diff --git a/pkg/tagger/server/server.go b/pkg/tagger/server/server.go index dadbd5318837a..79b8847cc41b0 100644 --- a/pkg/tagger/server/server.go +++ b/pkg/tagger/server/server.go @@ -14,11 +14,11 @@ import ( "google.golang.org/grpc/status" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo" - pbutils "github.com/DataDog/datadog-agent/pkg/proto/utils" "github.com/DataDog/datadog-agent/pkg/tagger" "github.com/DataDog/datadog-agent/pkg/tagger/telemetry" "github.com/DataDog/datadog-agent/pkg/util/grpc" "github.com/DataDog/datadog-agent/pkg/util/log" + pbutils "github.com/DataDog/datadog-agent/pkg/util/proto" ) const ( diff --git a/pkg/trace/agent/agent_test.go b/pkg/trace/agent/agent_test.go index c7b6597e9ebfb..18a0126b3aca2 100644 --- a/pkg/trace/agent/agent_test.go +++ b/pkg/trace/agent/agent_test.go @@ -37,7 +37,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/trace/traceutil" "github.com/DataDog/datadog-agent/pkg/trace/writer" - "github.com/gogo/protobuf/proto" + "google.golang.org/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -696,7 +696,7 @@ func TestClientComputedTopLevel(t *testing.T) { } func TestFilteredByTags(t *testing.T) { - for _, tt := range []struct { + for _, tt := range []*struct { require []*config.Tag reject []*config.Tag span pb.Span @@ -1023,8 +1023,8 @@ func TestSample(t *testing.T) { } for name, tt := range tests { t.Run(name, func(t *testing.T) { - before := new(pb.TraceChunk) // make sure tt.trace.TraceChunk never changes - *before = *tt.trace.TraceChunk + // before := traceutil.CopyTraceChunk(tt.trace.TraceChunk) + before := tt.trace.TraceChunk.ShallowCopy() _, keep, sampled := a.sample(time.Now(), info.NewReceiverStats().GetTagStats(info.Tags{}), &tt.trace) assert.Equal(t, tt.keep, keep) assert.Equal(t, tt.dropped, sampled.TraceChunk.DroppedTrace) @@ -1661,8 +1661,8 @@ func TestSampleWithPriorityNone(t *testing.T) { TraceChunk: testutil.TraceChunkWithSpan(span), Root: span, } - before := new(pb.TraceChunk) - *before = *pt.TraceChunk + // before := traceutil.CopyTraceChunk(pt.TraceChunk) + before := pt.TraceChunk.ShallowCopy() numEvents, keep, sampled := agnt.sample(time.Now(), info.NewReceiverStats().GetTagStats(info.Tags{}), &pt) assert.True(t, keep) // Score Sampler should keep the trace. assert.False(t, sampled.TraceChunk.DroppedTrace) diff --git a/pkg/trace/agent/fuzz_test.go b/pkg/trace/agent/fuzz_test.go index 09dc7d8616169..59c110803ec1f 100644 --- a/pkg/trace/agent/fuzz_test.go +++ b/pkg/trace/agent/fuzz_test.go @@ -58,10 +58,10 @@ func FuzzObfuscateSpan(f *testing.F) { encode := func(pbSpan *pb.Span) ([]byte, error) { return pbSpan.MarshalMsg(nil) } - decode := func(span []byte) (pb.Span, error) { + decode := func(span []byte) (*pb.Span, error) { var pbSpan pb.Span _, err := pbSpan.UnmarshalMsg(span) - return pbSpan, err + return &pbSpan, err } seedCorpus := []*pb.Span{ { @@ -92,8 +92,8 @@ func FuzzObfuscateSpan(f *testing.F) { if err != nil { t.Skipf("Skipping invalid span: %v", err) } - agent.obfuscateSpan(&pbSpan) - encPostObfuscate, err := encode(&pbSpan) + agent.obfuscateSpan(pbSpan) + encPostObfuscate, err := encode(pbSpan) if err != nil { t.Fatalf("obfuscateSpan returned an invalid span: %v", err) } diff --git a/pkg/trace/agent/obfuscate_test.go b/pkg/trace/agent/obfuscate_test.go index 822e474373bae..1169fd8ace8ea 100644 --- a/pkg/trace/agent/obfuscate_test.go +++ b/pkg/trace/agent/obfuscate_test.go @@ -268,7 +268,7 @@ func TestSQLResourceWithoutQuery(t *testing.T) { func TestSQLResourceWithError(t *testing.T) { assert := assert.New(t) - testCases := []struct { + testCases := []*struct { span pb.Span }{ { diff --git a/pkg/trace/api/api.go b/pkg/trace/api/api.go index ef4661193d660..370f45db4c21c 100644 --- a/pkg/trace/api/api.go +++ b/pkg/trace/api/api.go @@ -344,7 +344,7 @@ func (r *HTTPReceiver) tagStats(v Version, httpHeader http.Header) *info.TagStat func decodeTracerPayload(v Version, req *http.Request, ts *info.TagStats, cIDProvider IDProvider) (tp *pb.TracerPayload, ranHook bool, err error) { switch v { case v01: - var spans []pb.Span + var spans []*pb.Span if err = json.NewDecoder(req.Body).Decode(&spans); err != nil { return nil, false, err } @@ -740,11 +740,11 @@ func decodeRequest(req *http.Request, dest *pb.Traces) (ranHook bool, err error) } } -func traceChunksFromSpans(spans []pb.Span) []*pb.TraceChunk { +func traceChunksFromSpans(spans []*pb.Span) []*pb.TraceChunk { traceChunks := []*pb.TraceChunk{} byID := make(map[uint64][]*pb.Span) for _, s := range spans { - byID[s.TraceID] = append(byID[s.TraceID], &s) + byID[s.TraceID] = append(byID[s.TraceID], s) } for _, t := range byID { traceChunks = append(traceChunks, &pb.TraceChunk{ diff --git a/pkg/trace/api/otlp_test.go b/pkg/trace/api/otlp_test.go index 78f9148cf9340..4e7031c82adb4 100644 --- a/pkg/trace/api/otlp_test.go +++ b/pkg/trace/api/otlp_test.go @@ -702,7 +702,7 @@ func TestOTLPHelpers(t *testing.T) { }) t.Run("status2Error", func(t *testing.T) { - for _, tt := range []struct { + for _, tt := range []*struct { status ptrace.StatusCode msg string events ptrace.SpanEventSlice diff --git a/pkg/trace/api/telemetry.go b/pkg/trace/api/telemetry.go index 7a7fdf9c6986e..59e0a0ac55005 100644 --- a/pkg/trace/api/telemetry.go +++ b/pkg/trace/api/telemetry.go @@ -13,6 +13,7 @@ import ( "net/http" "net/http/httputil" "net/url" + "strings" "time" "github.com/DataDog/datadog-agent/pkg/trace/api/internal/header" @@ -86,15 +87,25 @@ func (r *HTTPReceiver) telemetryProxyHandler() http.Handler { req.Header.Set("User-Agent", "") } - if cid := r.containerIDProvider.GetContainerID(req.Context(), req.Header); cid != "" { - req.Header.Set(header.ContainerID, cid) - } else { + containerID := r.containerIDProvider.GetContainerID(req.Context(), req.Header) + if containerID == "" { metrics.Count("datadog.trace_agent.telemetry_proxy.no_container_id_found", 1, []string{}, 1) } + containerTags := getContainerTags(r.conf.ContainerTags, containerID) + req.Header.Set("DD-Agent-Hostname", r.conf.Hostname) req.Header.Set("DD-Agent-Env", r.conf.DefaultEnv) + if containerID != "" { + req.Header.Set(header.ContainerID, containerID) + } + if containerTags != "" { + req.Header.Set("x-datadog-container-tags", containerTags) + } + if taskArn, ok := extractFargateTask(containerTags); ok { + req.Header.Set("dd-task-arn", taskArn) + } if arn, ok := r.conf.GlobalTags[functionARNKey]; ok { - req.Header.Set("DD-Function-ARN", arn) + req.Header.Set("dd-function-arn", arn) } } return &httputil.ReverseProxy{ @@ -104,6 +115,26 @@ func (r *HTTPReceiver) telemetryProxyHandler() http.Handler { } } +func extractFargateTask(containerTags string) (string, bool) { + return extractTag(containerTags, "task_arn") +} + +func extractTag(tags string, name string) (string, bool) { + leftoverTags := tags + for { + if leftoverTags == "" { + return "", false + } + var tag string + tag, leftoverTags, _ = strings.Cut(leftoverTags, ",") + + tagName, value, hasValue := strings.Cut(tag, ":") + if hasValue && tagName == name { + return value, true + } + } +} + // RoundTrip sends request first to Endpoint[0], then sends a copy of main request to every configurged // additional endpoint. // diff --git a/pkg/trace/api/telemetry_test.go b/pkg/trace/api/telemetry_test.go index f51e6888064aa..5230507190770 100644 --- a/pkg/trace/api/telemetry_test.go +++ b/pkg/trace/api/telemetry_test.go @@ -197,3 +197,41 @@ func TestTelemetryConfig(t *testing.T) { assert.Equal(t, "OK", recordedResponse(t, rec)) }) } + +func TestExtractFargateTask(t *testing.T) { + t.Run("contains-tag", func(t *testing.T) { + tags := "foo:bar,baz:,task_arn:123" + + taskArn, ok := extractFargateTask(tags) + + assert.True(t, ok) + assert.Equal(t, "123", taskArn) + }) + + t.Run("doesnt-contain-tag", func(t *testing.T) { + tags := "foo:bar,," + + taskArn, ok := extractFargateTask(tags) + + assert.False(t, ok) + assert.Equal(t, "", taskArn) + }) + + t.Run("contain-empty-tag", func(t *testing.T) { + tags := "foo:bar,task_arn:,baz:abc" + + taskArn, ok := extractFargateTask(tags) + + assert.True(t, ok) + assert.Equal(t, "", taskArn) + }) + + t.Run("empty-string", func(t *testing.T) { + tags := "" + + taskArn, ok := extractFargateTask(tags) + + assert.False(t, ok) + assert.Equal(t, "", taskArn) + }) +} diff --git a/pkg/trace/go.mod b/pkg/trace/go.mod index 5c71e53680718..660355b89bd06 100644 --- a/pkg/trace/go.mod +++ b/pkg/trace/go.mod @@ -10,65 +10,71 @@ go 1.18 replace github.com/docker/distribution => github.com/docker/distribution v2.8.1+incompatible require ( - github.com/DataDog/datadog-agent/pkg/obfuscate v0.45.0-rc.3 - github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.45.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/cgroups v0.45.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.45.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/pointer v0.45.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.45.0-rc.3 + github.com/DataDog/datadog-agent/pkg/obfuscate v0.46.0-rc.2 + github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.46.0-rc.2 + github.com/DataDog/datadog-agent/pkg/util/cgroups v0.46.0-rc.2 + github.com/DataDog/datadog-agent/pkg/util/log v0.46.0-rc.2 + github.com/DataDog/datadog-agent/pkg/util/pointer v0.46.0-rc.2 + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.46.0-rc.2 github.com/DataDog/datadog-go/v5 v5.1.1 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.2.0 - github.com/DataDog/sketches-go v1.4.1 - github.com/Microsoft/go-winio v0.5.2 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.2.3 + github.com/DataDog/sketches-go v1.4.2 + github.com/Microsoft/go-winio v0.6.0 github.com/davecgh/go-spew v1.1.1 github.com/gogo/protobuf v1.3.2 github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.3 github.com/google/gofuzz v1.2.0 github.com/google/uuid v1.3.0 - github.com/shirou/gopsutil/v3 v3.22.9 - github.com/stretchr/testify v1.8.2 - github.com/tinylib/msgp v1.1.6 + github.com/shirou/gopsutil/v3 v3.23.2 + github.com/stretchr/testify v1.8.3 + github.com/tinylib/msgp v1.1.8 github.com/vmihailenco/msgpack/v4 v4.3.12 - go.opentelemetry.io/collector/pdata v1.0.0-rcv0011 - go.opentelemetry.io/collector/semconv v0.75.0 + go.opentelemetry.io/collector/pdata v1.0.0-rcv0012 + go.opentelemetry.io/collector/semconv v0.78.1 go.uber.org/atomic v1.11.0 - golang.org/x/sys v0.7.0 - golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba - google.golang.org/grpc v1.54.0 - k8s.io/apimachinery v0.23.8 + golang.org/x/sys v0.8.0 + golang.org/x/time v0.3.0 + google.golang.org/grpc v1.55.0 + google.golang.org/protobuf v1.30.0 + k8s.io/apimachinery v0.25.5 ) require ( github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect + github.com/containerd/cgroups v1.0.4 // indirect + github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect - github.com/dustin/go-humanize v1.0.0 // indirect + github.com/docker/go-units v0.4.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect github.com/go-ole/go-ole v1.2.6 // indirect + github.com/godbus/dbus/v5 v5.0.6 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/karrick/godirwalk v1.17.0 // indirect - github.com/kr/pretty v0.3.0 // indirect - github.com/lufia/plan9stats v0.0.0-20220517141722-cf486979b281 // indirect + github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect github.com/outcaste-io/ristretto v0.2.1 // indirect - github.com/philhofer/fwd v1.1.1 // indirect + github.com/philhofer/fwd v1.1.2 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect - github.com/tklauser/go-sysconf v0.3.10 // indirect - github.com/tklauser/numcpus v0.5.0 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.5.0 // indirect + github.com/tklauser/go-sysconf v0.3.11 // indirect + github.com/tklauser/numcpus v0.6.0 // indirect github.com/vmihailenco/tagparser v0.1.2 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect - golang.org/x/net v0.9.0 // indirect + golang.org/x/crypto v0.7.0 // indirect + golang.org/x/mod v0.10.0 // indirect + golang.org/x/net v0.10.0 // indirect golang.org/x/text v0.9.0 // indirect + golang.org/x/tools v0.9.1 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect - google.golang.org/protobuf v1.30.0 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) @@ -76,6 +82,7 @@ require ( replace ( github.com/DataDog/datadog-agent => ../../ github.com/DataDog/datadog-agent/pkg/obfuscate => ../obfuscate + github.com/DataDog/datadog-agent/pkg/proto => ../proto github.com/DataDog/datadog-agent/pkg/remoteconfig/state => ../remoteconfig/state github.com/DataDog/datadog-agent/pkg/util/cgroups => ../util/cgroups github.com/DataDog/datadog-agent/pkg/util/log => ../util/log diff --git a/pkg/trace/go.sum b/pkg/trace/go.sum index daf35cbbd9008..07cb0716e5df0 100644 --- a/pkg/trace/go.sum +++ b/pkg/trace/go.sum @@ -1,22 +1,15 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/datadog-go/v5 v5.1.1 h1:JLZ6s2K1pG2h9GkvEvMdEGqMDyVLEAccdX5TltWcLMU= github.com/DataDog/datadog-go/v5 v5.1.1/go.mod h1:KhiYb2Badlv9/rofz+OznKoEF5XKTonWyhx5K83AP8E= github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork h1:yBq5PrAtrM4yVeSzQ+bn050+Ysp++RKF1QmtkL4VqvU= github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork/go.mod h1:yA5JwkZsHTLuqq3zaRgUQf35DfDkpOZqgtBqHKpwrBs= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.2.0 h1:WiLLKBFvMf8R53s610OlgsgsOk5m9t7imfDSj9qUDQs= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.2.0/go.mod h1:FN5Kqegof+xUhQy4W3rIQ7qHpBehbB6EnEs1BXWzJDI= -github.com/DataDog/sketches-go v1.4.1 h1:j5G6as+9FASM2qC36lvpvQAj9qsv/jUs3FtO8CwZNAY= -github.com/DataDog/sketches-go v1.4.1/go.mod h1:xJIXldczJyyjnbDop7ZZcLxJdV3+7Kra7H1KMgpgkLk= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.2.3 h1:QM2aly2st5pdRuDhc8w026YXNIm9IjjyaXhJ4BTdTZQ= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.2.3/go.mod h1:q5aHWhZtr114VrPgmlJL+JnemK7O66YxDCRy9zAHAxU= +github.com/DataDog/sketches-go v1.4.2 h1:gppNudE9d19cQ98RYABOetxIhpTCl4m7CnbRZjvVA/o= +github.com/DataDog/sketches-go v1.4.2/go.mod h1:xJIXldczJyyjnbDop7ZZcLxJdV3+7Kra7H1KMgpgkLk= github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= -github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= +github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -25,84 +18,64 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= +github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= +github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/flynn/go-docopt v0.0.0-20140912013429-f6dd2ebbb31e/go.mod h1:HyVoz1Mz5Co8TFO8EupIdlcpwShBmY98dkT2xeHkvEI= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro= +github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/karrick/godirwalk v1.17.0 h1:b4kY7nqDdioR/6qnbHQyDvmA17u5G1cZ6J+CZXwSWoI= @@ -110,47 +83,36 @@ github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1q github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/lufia/plan9stats v0.0.0-20220517141722-cf486979b281 h1:aczX6NMOtt6L4YT0fQvKkDK6LZEtdOso9sUH89V1+P0= -github.com/lufia/plan9stats v0.0.0-20220517141722-cf486979b281/go.mod h1:lc+czkgO/8F7puNki5jk8QyujbfK1LOT7Wl0ON2hxyk= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= +github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/outcaste-io/ristretto v0.2.1 h1:KCItuNIGJZcursqHr3ghO7fc5ddZLEHspL9UR0cQM64= github.com/outcaste-io/ristretto v0.2.1/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac= -github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= -github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= +github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -158,18 +120,13 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/secure-systems-lab/go-securesystemslib v0.3.1/go.mod h1:o8hhjkbNl2gOamKUA/eNW3xUrntHT9L4W89W1nfj43U= -github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE= -github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs= -github.com/shirou/gopsutil/v3 v3.22.9 h1:yibtJhIVEMcdw+tCTbOPiF1VcsuDeTE4utJ8Dm4c5eA= -github.com/shirou/gopsutil/v3 v3.22.9/go.mod h1:bBYl1kjgEJpWpxeHmLI+dVHWtyAwfcmSBLDsp2TNT8A= +github.com/secure-systems-lab/go-securesystemslib v0.5.0 h1:oTiNu0QnulMQgN/hLK124wJD/r2f9ZhIUuKIeBsCBT8= +github.com/secure-systems-lab/go-securesystemslib v0.5.0/go.mod h1:uoCqUC0Ap7jrBSEanxT+SdACYJTVplRXWLkGMuDjXqk= +github.com/shirou/gopsutil/v3 v3.23.2 h1:PAWSuiAszn7IhPMBtXsbSCafej7PqUOvY6YywlQUExU= +github.com/shirou/gopsutil/v3 v3.23.2/go.mod h1:gv0aQw33GLo3pG8SiWKiQrbDzbRY1K80RyZJ7V4Th1M= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= @@ -182,16 +139,16 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= -github.com/tinylib/msgp v1.1.6 h1:i+SbKraHhnrf9M5MYmvQhFnbLhAXSDWF8WWsuyRdocw= -github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw= -github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= -github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= -github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= -github.com/tklauser/numcpus v0.5.0 h1:ooe7gN0fg6myJ0EKoTAf5hebTZrH52px3New/D9iJ+A= -github.com/tklauser/numcpus v0.5.0/go.mod h1:OGzpTxpcIMNGYQdit2BYL1pvk/dSOaJWjKoflh+RQjo= +github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0= +github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw= +github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= +github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= +github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= +github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= @@ -200,12 +157,13 @@ github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgq github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0011 h1:7lT0vseP89mHtUpvgmWYRvQZ0eY+SHbVsnXY20xkoMg= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0011/go.mod h1:9vrXSQBeMRrdfGt9oMgYweqERJ8adaiQjN6LSbqRMMA= -go.opentelemetry.io/collector/semconv v0.75.0 h1:zIlZk+zh1bgc3VKE1PZEmhOaVa4tQHZMcFFUXmGekVs= -go.opentelemetry.io/collector/semconv v0.75.0/go.mod h1:xt8oDOiwa1jy24tGUo8+SzpphI7ZredS2WM/0m8rtTA= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0012 h1:R+cfEUMyLn9Q1QknyQ4QU77pbfc1aJKYEXFHtnwSbCg= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0012/go.mod h1:rEAKFqc1L03lidKtra/2/dJtI0Hp+JsQxuPEIkj/2Vg= +go.opentelemetry.io/collector/semconv v0.78.1 h1:YlhokDVTP+gw6yKA0Jc2FcfddhD+a6E5Ixmby5xBWs0= +go.opentelemetry.io/collector/semconv v0.78.1/go.mod h1:lazBA42nqZPNPWDMiqWfr5eIVeNgRmoLDbQmjXKcm70= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -215,45 +173,43 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -275,108 +231,73 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= +golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= +golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= +google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/apimachinery v0.23.8 h1:6Z+0LLvvPnAF6GXbUcBmzB1+b/AnDZpVd2N0MxUJcl0= -k8s.io/apimachinery v0.23.8/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= -k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +k8s.io/apimachinery v0.25.5 h1:SQomYHvv+aO43qdu3QKRf9YuI0oI8w3RrOQ1qPbAUGY= +k8s.io/apimachinery v0.25.5/go.mod h1:1S2i1QHkmxc8+EZCIxe/fX5hpldVXk4gvnJInMEb8D4= diff --git a/pkg/trace/pb/agent_payload.pb.go b/pkg/trace/pb/agent_payload.pb.go index 69ca272a23e94..9aa335dd00c25 100644 --- a/pkg/trace/pb/agent_payload.pb.go +++ b/pkg/trace/pb/agent_payload.pb.go @@ -1,29 +1,33 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// protoc -I. -I$GOPATH/src --gogofaster_out=. span.proto tracer_payload.proto agent_payload.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 // source: agent_payload.proto package pb import ( - encoding_binary "encoding/binary" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // AgentPayload represents payload the agent sends to the intake. type AgentPayload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // hostName specifies hostname of where the agent is running. HostName string `protobuf:"bytes,1,opt,name=hostName,proto3" json:"hostName,omitempty"` // env specifies `env` set in agent configuration. @@ -42,738 +46,193 @@ type AgentPayload struct { RareSamplerEnabled bool `protobuf:"varint,10,opt,name=rareSamplerEnabled,proto3" json:"rareSamplerEnabled,omitempty"` } -func (m *AgentPayload) Reset() { *m = AgentPayload{} } -func (m *AgentPayload) String() string { return proto.CompactTextString(m) } -func (*AgentPayload) ProtoMessage() {} -func (*AgentPayload) Descriptor() ([]byte, []int) { - return fileDescriptor_bddccf659a49dc88, []int{0} +func (x *AgentPayload) Reset() { + *x = AgentPayload{} + if protoimpl.UnsafeEnabled { + mi := &file_agent_payload_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *AgentPayload) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + +func (x *AgentPayload) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *AgentPayload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AgentPayload.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err + +func (*AgentPayload) ProtoMessage() {} + +func (x *AgentPayload) ProtoReflect() protoreflect.Message { + mi := &file_agent_payload_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *AgentPayload) XXX_Merge(src proto.Message) { - xxx_messageInfo_AgentPayload.Merge(m, src) -} -func (m *AgentPayload) XXX_Size() int { - return m.Size() -} -func (m *AgentPayload) XXX_DiscardUnknown() { - xxx_messageInfo_AgentPayload.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_AgentPayload proto.InternalMessageInfo +// Deprecated: Use AgentPayload.ProtoReflect.Descriptor instead. +func (*AgentPayload) Descriptor() ([]byte, []int) { + return file_agent_payload_proto_rawDescGZIP(), []int{0} +} -func (m *AgentPayload) GetHostName() string { - if m != nil { - return m.HostName +func (x *AgentPayload) GetHostName() string { + if x != nil { + return x.HostName } return "" } -func (m *AgentPayload) GetEnv() string { - if m != nil { - return m.Env +func (x *AgentPayload) GetEnv() string { + if x != nil { + return x.Env } return "" } -func (m *AgentPayload) GetTracerPayloads() []*TracerPayload { - if m != nil { - return m.TracerPayloads +func (x *AgentPayload) GetTracerPayloads() []*TracerPayload { + if x != nil { + return x.TracerPayloads } return nil } -func (m *AgentPayload) GetTags() map[string]string { - if m != nil { - return m.Tags +func (x *AgentPayload) GetTags() map[string]string { + if x != nil { + return x.Tags } return nil } -func (m *AgentPayload) GetAgentVersion() string { - if m != nil { - return m.AgentVersion +func (x *AgentPayload) GetAgentVersion() string { + if x != nil { + return x.AgentVersion } return "" } -func (m *AgentPayload) GetTargetTPS() float64 { - if m != nil { - return m.TargetTPS +func (x *AgentPayload) GetTargetTPS() float64 { + if x != nil { + return x.TargetTPS } return 0 } -func (m *AgentPayload) GetErrorTPS() float64 { - if m != nil { - return m.ErrorTPS +func (x *AgentPayload) GetErrorTPS() float64 { + if x != nil { + return x.ErrorTPS } return 0 } -func (m *AgentPayload) GetRareSamplerEnabled() bool { - if m != nil { - return m.RareSamplerEnabled +func (x *AgentPayload) GetRareSamplerEnabled() bool { + if x != nil { + return x.RareSamplerEnabled } return false } -func init() { - proto.RegisterType((*AgentPayload)(nil), "pb.AgentPayload") - proto.RegisterMapType((map[string]string)(nil), "pb.AgentPayload.TagsEntry") -} - -func init() { proto.RegisterFile("agent_payload.proto", fileDescriptor_bddccf659a49dc88) } - -var fileDescriptor_bddccf659a49dc88 = []byte{ - // 306 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x51, 0xbd, 0x4e, 0xf3, 0x40, - 0x10, 0xcc, 0x39, 0x5f, 0xf2, 0xc5, 0x4b, 0x84, 0xe0, 0x48, 0x71, 0xb2, 0x90, 0x65, 0xa5, 0x72, - 0xe5, 0x02, 0x0a, 0x7e, 0x3a, 0x90, 0xd2, 0xa2, 0xc8, 0x89, 0x68, 0xd1, 0x9a, 0xac, 0x0c, 0xc2, - 0xf1, 0x59, 0xeb, 0x23, 0x52, 0xde, 0x82, 0xc7, 0xa2, 0x4c, 0x49, 0x89, 0x92, 0x9e, 0x67, 0x40, - 0x77, 0x89, 0x42, 0x82, 0xe8, 0x76, 0x66, 0x76, 0x4e, 0xb3, 0x73, 0x70, 0x82, 0x39, 0x95, 0xe6, - 0xa1, 0xc2, 0x79, 0xa1, 0x71, 0x92, 0x54, 0xac, 0x8d, 0x96, 0x5e, 0x95, 0x05, 0x3d, 0xc3, 0xf8, - 0x48, 0xbc, 0xaf, 0xf4, 0xbf, 0x3c, 0xe8, 0xde, 0x58, 0xc7, 0x70, 0x4d, 0xcb, 0x00, 0x3a, 0x4f, - 0xba, 0x36, 0x77, 0x38, 0x25, 0x25, 0x22, 0x11, 0xfb, 0xe9, 0x16, 0xcb, 0x23, 0x68, 0x52, 0x39, - 0x53, 0x9e, 0xa3, 0xed, 0x28, 0xaf, 0xe0, 0x70, 0xfd, 0xec, 0xc6, 0x5e, 0xab, 0x56, 0xd4, 0x8c, - 0x0f, 0xce, 0x8e, 0x93, 0x2a, 0x4b, 0xc6, 0xbb, 0x4a, 0xfa, 0x6b, 0x51, 0x26, 0xf0, 0xcf, 0x60, - 0x5e, 0xab, 0xb6, 0x33, 0x04, 0xd6, 0xb0, 0x1b, 0x24, 0x19, 0x63, 0x5e, 0x0f, 0x4a, 0xc3, 0xf3, - 0xd4, 0xed, 0xc9, 0x3e, 0x74, 0xdd, 0x69, 0xf7, 0xc4, 0xf5, 0xb3, 0x2e, 0xd5, 0x7f, 0x97, 0x62, - 0x8f, 0x93, 0xa7, 0xe0, 0x1b, 0xe4, 0x9c, 0xcc, 0x78, 0x38, 0x52, 0x9d, 0x48, 0xc4, 0x22, 0xfd, - 0x21, 0xec, 0x69, 0xc4, 0xac, 0xd9, 0x8a, 0xbe, 0x13, 0xb7, 0x58, 0x26, 0x20, 0x19, 0x99, 0x46, - 0x38, 0xad, 0x0a, 0xe2, 0x41, 0x89, 0x59, 0x41, 0x13, 0x05, 0x91, 0x88, 0x3b, 0xe9, 0x1f, 0x4a, - 0x70, 0x01, 0xfe, 0x36, 0xa0, 0xed, 0xe5, 0x85, 0xe6, 0x9b, 0xba, 0xec, 0x28, 0x7b, 0xd0, 0x9a, - 0x61, 0xf1, 0x4a, 0x9b, 0xae, 0xd6, 0xe0, 0xda, 0xbb, 0x14, 0xb7, 0xea, 0x7d, 0x19, 0x8a, 0xc5, - 0x32, 0x14, 0x9f, 0xcb, 0x50, 0xbc, 0xad, 0xc2, 0xc6, 0x62, 0x15, 0x36, 0x3e, 0x56, 0x61, 0x23, - 0x6b, 0xbb, 0x1f, 0x39, 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xa8, 0xf9, 0xd4, 0x6f, 0xc2, 0x01, - 0x00, 0x00, -} - -func (m *AgentPayload) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AgentPayload) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.HostName) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintAgentPayload(dAtA, i, uint64(len(m.HostName))) - i += copy(dAtA[i:], m.HostName) - } - if len(m.Env) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintAgentPayload(dAtA, i, uint64(len(m.Env))) - i += copy(dAtA[i:], m.Env) - } - if len(m.TracerPayloads) > 0 { - for _, msg := range m.TracerPayloads { - dAtA[i] = 0x2a - i++ - i = encodeVarintAgentPayload(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Tags) > 0 { - for k, _ := range m.Tags { - dAtA[i] = 0x32 - i++ - v := m.Tags[k] - mapSize := 1 + len(k) + sovAgentPayload(uint64(len(k))) + 1 + len(v) + sovAgentPayload(uint64(len(v))) - i = encodeVarintAgentPayload(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintAgentPayload(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintAgentPayload(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if len(m.AgentVersion) > 0 { - dAtA[i] = 0x3a - i++ - i = encodeVarintAgentPayload(dAtA, i, uint64(len(m.AgentVersion))) - i += copy(dAtA[i:], m.AgentVersion) - } - if m.TargetTPS != 0 { - dAtA[i] = 0x41 - i++ - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.TargetTPS)))) - i += 8 - } - if m.ErrorTPS != 0 { - dAtA[i] = 0x49 - i++ - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ErrorTPS)))) - i += 8 - } - if m.RareSamplerEnabled { - dAtA[i] = 0x50 - i++ - if m.RareSamplerEnabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func encodeVarintAgentPayload(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *AgentPayload) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.HostName) - if l > 0 { - n += 1 + l + sovAgentPayload(uint64(l)) - } - l = len(m.Env) - if l > 0 { - n += 1 + l + sovAgentPayload(uint64(l)) - } - if len(m.TracerPayloads) > 0 { - for _, e := range m.TracerPayloads { - l = e.Size() - n += 1 + l + sovAgentPayload(uint64(l)) - } - } - if len(m.Tags) > 0 { - for k, v := range m.Tags { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovAgentPayload(uint64(len(k))) + 1 + len(v) + sovAgentPayload(uint64(len(v))) - n += mapEntrySize + 1 + sovAgentPayload(uint64(mapEntrySize)) - } - } - l = len(m.AgentVersion) - if l > 0 { - n += 1 + l + sovAgentPayload(uint64(l)) - } - if m.TargetTPS != 0 { - n += 9 - } - if m.ErrorTPS != 0 { - n += 9 - } - if m.RareSamplerEnabled { - n += 2 - } - return n +var File_agent_payload_proto protoreflect.FileDescriptor + +var file_agent_payload_proto_rawDesc = []byte{ + 0x0a, 0x13, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x14, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x72, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xee, 0x02, 0x0a, 0x0c, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, + 0x65, 0x6e, 0x76, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x6e, 0x76, 0x12, 0x39, + 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, + 0x65, 0x72, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x0e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x72, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x12, 0x2e, 0x0a, 0x04, 0x74, 0x61, 0x67, + 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x67, 0x65, + 0x6e, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x67, 0x65, + 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, + 0x09, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x50, 0x53, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x09, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x50, 0x53, 0x12, 0x1a, 0x0a, 0x08, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x54, 0x50, 0x53, 0x18, 0x09, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x54, 0x50, 0x53, 0x12, 0x2e, 0x0a, 0x12, 0x72, 0x61, 0x72, 0x65, 0x53, + 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x61, 0x72, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, + 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44, + 0x61, 0x74, 0x61, 0x44, 0x6f, 0x67, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2d, 0x61, + 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x70, + 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func sovAgentPayload(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozAgentPayload(x uint64) (n int) { - return sovAgentPayload(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *AgentPayload) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAgentPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AgentPayload: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AgentPayload: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAgentPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAgentPayload - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAgentPayload - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAgentPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAgentPayload - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAgentPayload - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TracerPayloads", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAgentPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAgentPayload - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAgentPayload - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TracerPayloads = append(m.TracerPayloads, &TracerPayload{}) - if err := m.TracerPayloads[len(m.TracerPayloads)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAgentPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAgentPayload - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAgentPayload - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Tags == nil { - m.Tags = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAgentPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAgentPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthAgentPayload - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthAgentPayload - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAgentPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthAgentPayload - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthAgentPayload - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipAgentPayload(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthAgentPayload - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Tags[mapkey] = mapvalue - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAgentPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAgentPayload - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAgentPayload - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AgentVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetTPS", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.TargetTPS = float64(math.Float64frombits(v)) - case 9: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field ErrorTPS", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.ErrorTPS = float64(math.Float64frombits(v)) - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RareSamplerEnabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAgentPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RareSamplerEnabled = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipAgentPayload(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthAgentPayload - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthAgentPayload - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } +var ( + file_agent_payload_proto_rawDescOnce sync.Once + file_agent_payload_proto_rawDescData = file_agent_payload_proto_rawDesc +) - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipAgentPayload(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAgentPayload - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAgentPayload - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } +func file_agent_payload_proto_rawDescGZIP() []byte { + file_agent_payload_proto_rawDescOnce.Do(func() { + file_agent_payload_proto_rawDescData = protoimpl.X.CompressGZIP(file_agent_payload_proto_rawDescData) + }) + return file_agent_payload_proto_rawDescData +} + +var file_agent_payload_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_agent_payload_proto_goTypes = []interface{}{ + (*AgentPayload)(nil), // 0: pb.AgentPayload + nil, // 1: pb.AgentPayload.TagsEntry + (*TracerPayload)(nil), // 2: pb.TracerPayload +} +var file_agent_payload_proto_depIdxs = []int32{ + 2, // 0: pb.AgentPayload.tracerPayloads:type_name -> pb.TracerPayload + 1, // 1: pb.AgentPayload.tags:type_name -> pb.AgentPayload.TagsEntry + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_agent_payload_proto_init() } +func file_agent_payload_proto_init() { + if File_agent_payload_proto != nil { + return + } + file_tracer_payload_proto_init() + if !protoimpl.UnsafeEnabled { + file_agent_payload_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AgentPayload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAgentPayload - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthAgentPayload - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthAgentPayload - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAgentPayload - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipAgentPayload(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthAgentPayload - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } } - panic("unreachable") + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_agent_payload_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_agent_payload_proto_goTypes, + DependencyIndexes: file_agent_payload_proto_depIdxs, + MessageInfos: file_agent_payload_proto_msgTypes, + }.Build() + File_agent_payload_proto = out.File + file_agent_payload_proto_rawDesc = nil + file_agent_payload_proto_goTypes = nil + file_agent_payload_proto_depIdxs = nil } - -var ( - ErrInvalidLengthAgentPayload = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowAgentPayload = fmt.Errorf("proto: integer overflow") -) diff --git a/pkg/trace/pb/agent_payload.proto b/pkg/trace/pb/agent_payload.proto index c99c4421f44e3..3d0f4f2471f94 100644 --- a/pkg/trace/pb/agent_payload.proto +++ b/pkg/trace/pb/agent_payload.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package pb; +option go_package = "github.com/DataDog/datadog-agent/pkg/trace/pb"; import "tracer_payload.proto"; diff --git a/pkg/trace/pb/agent_payload_vtproto.pb.go b/pkg/trace/pb/agent_payload_vtproto.pb.go new file mode 100644 index 0000000000000..36ca03fe83430 --- /dev/null +++ b/pkg/trace/pb/agent_payload_vtproto.pb.go @@ -0,0 +1,523 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.4.0 +// source: agent_payload.proto + +package pb + +import ( + binary "encoding/binary" + fmt "fmt" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *AgentPayload) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AgentPayload) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AgentPayload) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RareSamplerEnabled { + i-- + if m.RareSamplerEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.ErrorTPS != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ErrorTPS)))) + i-- + dAtA[i] = 0x49 + } + if m.TargetTPS != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.TargetTPS)))) + i-- + dAtA[i] = 0x41 + } + if len(m.AgentVersion) > 0 { + i -= len(m.AgentVersion) + copy(dAtA[i:], m.AgentVersion) + i = encodeVarint(dAtA, i, uint64(len(m.AgentVersion))) + i-- + dAtA[i] = 0x3a + } + if len(m.Tags) > 0 { + for k := range m.Tags { + v := m.Tags[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.TracerPayloads) > 0 { + for iNdEx := len(m.TracerPayloads) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TracerPayloads[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Env) > 0 { + i -= len(m.Env) + copy(dAtA[i:], m.Env) + i = encodeVarint(dAtA, i, uint64(len(m.Env))) + i-- + dAtA[i] = 0x12 + } + if len(m.HostName) > 0 { + i -= len(m.HostName) + copy(dAtA[i:], m.HostName) + i = encodeVarint(dAtA, i, uint64(len(m.HostName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AgentPayload) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HostName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Env) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.TracerPayloads) > 0 { + for _, e := range m.TracerPayloads { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Tags) > 0 { + for k, v := range m.Tags { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + l = len(m.AgentVersion) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.TargetTPS != 0 { + n += 9 + } + if m.ErrorTPS != 0 { + n += 9 + } + if m.RareSamplerEnabled { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *AgentPayload) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AgentPayload: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AgentPayload: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TracerPayloads", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TracerPayloads = append(m.TracerPayloads, &TracerPayload{}) + if err := m.TracerPayloads[len(m.TracerPayloads)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tags == nil { + m.Tags = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Tags[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AgentVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetTPS", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.TargetTPS = float64(math.Float64frombits(v)) + case 9: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ErrorTPS", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ErrorTPS = float64(math.Float64frombits(v)) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RareSamplerEnabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RareSamplerEnabled = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/pkg/trace/pb/generate.sh b/pkg/trace/pb/generate.sh new file mode 100755 index 0000000000000..ed0d1388cfec0 --- /dev/null +++ b/pkg/trace/pb/generate.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + + +protoc -I. --go_out=paths=source_relative:. --go-vtproto_out=paths=source_relative:. --go-vtproto_opt=features=marshal+unmarshal+size span.proto tracer_payload.proto agent_payload.proto stats.proto +protoc-go-inject-tag -input=span.pb.go +protoc-go-inject-tag -input=tracer_payload.pb.go +protoc-go-inject-tag -input=agent_payload.pb.go + diff --git a/pkg/trace/pb/span.pb.go b/pkg/trace/pb/span.pb.go index 6e844c33d41eb..0d9607e45d24d 100644 --- a/pkg/trace/pb/span.pb.go +++ b/pkg/trace/pb/span.pb.go @@ -1,1214 +1,305 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 // source: span.proto package pb import ( - encoding_binary "encoding/binary" - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Span struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // service is the name of the service with which this span is associated. + // @gotags: json:"service" msg:"service" Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service" msg:"service"` // name is the operation name of this span. + // @gotags: json:"name" msg:"name" Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name" msg:"name"` // resource is the resource name of this span, also sometimes called the endpoint (for web spans). + // @gotags: json:"resource" msg:"resource" Resource string `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource" msg:"resource"` // traceID is the ID of the trace to which this span belongs. + // @gotags: json:"trace_id" msg:"trace_id" TraceID uint64 `protobuf:"varint,4,opt,name=traceID,proto3" json:"trace_id" msg:"trace_id"` // spanID is the ID of this span. + // @gotags: json:"span_id" msg:"span_id" SpanID uint64 `protobuf:"varint,5,opt,name=spanID,proto3" json:"span_id" msg:"span_id"` // parentID is the ID of this span's parent, or zero if this span has no parent. + // @gotags: json:"parent_id" msg:"parent_id" ParentID uint64 `protobuf:"varint,6,opt,name=parentID,proto3" json:"parent_id" msg:"parent_id"` // start is the number of nanoseconds between the Unix epoch and the beginning of this span. + // @gotags: json:"start" msg:"start" Start int64 `protobuf:"varint,7,opt,name=start,proto3" json:"start" msg:"start"` // duration is the time length of this span in nanoseconds. + // @gotags: json:"duration" msg:"duration" Duration int64 `protobuf:"varint,8,opt,name=duration,proto3" json:"duration" msg:"duration"` // error is 1 if there is an error associated with this span, or 0 if there is not. + // @gotags: json:"error" msg:"error" Error int32 `protobuf:"varint,9,opt,name=error,proto3" json:"error" msg:"error"` // meta is a mapping from tag name to tag value for string-valued tags. - Meta map[string]string `protobuf:"bytes,10,rep,name=meta,proto3" json:"meta" msg:"meta" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // @gotags: json:"meta" msg:"meta" + Meta map[string]string `protobuf:"bytes,10,rep,name=meta,proto3" json:"meta" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" msg:"meta"` // metrics is a mapping from tag name to tag value for numeric-valued tags. - Metrics map[string]float64 `protobuf:"bytes,11,rep,name=metrics,proto3" json:"metrics" msg:"metrics" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` + // @gotags: json:"metrics" msg:"metrics" + Metrics map[string]float64 `protobuf:"bytes,11,rep,name=metrics,proto3" json:"metrics" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3" msg:"metrics"` // type is the type of the service with which this span is associated. Example values: web, db, lambda. + // @gotags: json:"type" msg:"type" Type string `protobuf:"bytes,12,opt,name=type,proto3" json:"type" msg:"type"` // meta_struct is a registry of structured "other" data used by, e.g., AppSec. - MetaStruct map[string][]byte `protobuf:"bytes,13,rep,name=meta_struct,json=metaStruct,proto3" json:"meta_struct,omitempty" msg:"meta_struct" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // @gotags: json:"meta_struct,omitempty" msg:"meta_struct" + MetaStruct map[string][]byte `protobuf:"bytes,13,rep,name=meta_struct,json=metaStruct,proto3" json:"meta_struct,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" msg:"meta_struct"` } -func (m *Span) Reset() { *m = Span{} } -func (m *Span) String() string { return proto.CompactTextString(m) } -func (*Span) ProtoMessage() {} -func (*Span) Descriptor() ([]byte, []int) { - return fileDescriptor_fc5f2b88b579999f, []int{0} +func (x *Span) Reset() { + *x = Span{} + if protoimpl.UnsafeEnabled { + mi := &file_span_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Span) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + +func (x *Span) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Span.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err + +func (*Span) ProtoMessage() {} + +func (x *Span) ProtoReflect() protoreflect.Message { + mi := &file_span_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *Span) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span.Merge(m, src) -} -func (m *Span) XXX_Size() int { - return m.Size() -} -func (m *Span) XXX_DiscardUnknown() { - xxx_messageInfo_Span.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_Span proto.InternalMessageInfo +// Deprecated: Use Span.ProtoReflect.Descriptor instead. +func (*Span) Descriptor() ([]byte, []int) { + return file_span_proto_rawDescGZIP(), []int{0} +} -func (m *Span) GetService() string { - if m != nil { - return m.Service +func (x *Span) GetService() string { + if x != nil { + return x.Service } return "" } -func (m *Span) GetName() string { - if m != nil { - return m.Name +func (x *Span) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *Span) GetResource() string { - if m != nil { - return m.Resource +func (x *Span) GetResource() string { + if x != nil { + return x.Resource } return "" } -func (m *Span) GetTraceID() uint64 { - if m != nil { - return m.TraceID +func (x *Span) GetTraceID() uint64 { + if x != nil { + return x.TraceID } return 0 } -func (m *Span) GetSpanID() uint64 { - if m != nil { - return m.SpanID +func (x *Span) GetSpanID() uint64 { + if x != nil { + return x.SpanID } return 0 } -func (m *Span) GetParentID() uint64 { - if m != nil { - return m.ParentID +func (x *Span) GetParentID() uint64 { + if x != nil { + return x.ParentID } return 0 } -func (m *Span) GetStart() int64 { - if m != nil { - return m.Start +func (x *Span) GetStart() int64 { + if x != nil { + return x.Start } return 0 } -func (m *Span) GetDuration() int64 { - if m != nil { - return m.Duration +func (x *Span) GetDuration() int64 { + if x != nil { + return x.Duration } return 0 } -func (m *Span) GetError() int32 { - if m != nil { - return m.Error +func (x *Span) GetError() int32 { + if x != nil { + return x.Error } return 0 } -func (m *Span) GetMeta() map[string]string { - if m != nil { - return m.Meta +func (x *Span) GetMeta() map[string]string { + if x != nil { + return x.Meta } return nil } -func (m *Span) GetMetrics() map[string]float64 { - if m != nil { - return m.Metrics +func (x *Span) GetMetrics() map[string]float64 { + if x != nil { + return x.Metrics } return nil } -func (m *Span) GetType() string { - if m != nil { - return m.Type +func (x *Span) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *Span) GetMetaStruct() map[string][]byte { - if m != nil { - return m.MetaStruct +func (x *Span) GetMetaStruct() map[string][]byte { + if x != nil { + return x.MetaStruct } return nil } -func init() { - proto.RegisterType((*Span)(nil), "pb.Span") - proto.RegisterMapType((map[string]string)(nil), "pb.Span.MetaEntry") - proto.RegisterMapType((map[string][]byte)(nil), "pb.Span.MetaStructEntry") - proto.RegisterMapType((map[string]float64)(nil), "pb.Span.MetricsEntry") -} +var File_span_proto protoreflect.FileDescriptor -func init() { proto.RegisterFile("span.proto", fileDescriptor_fc5f2b88b579999f) } - -var fileDescriptor_fc5f2b88b579999f = []byte{ - // 552 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xc7, 0xe3, 0x7c, 0x67, 0x93, 0x52, 0x58, 0x51, 0xb4, 0x8a, 0x90, 0x6d, 0xed, 0x29, 0x42, - 0xe0, 0x4a, 0x80, 0xa0, 0x8a, 0x80, 0x43, 0x14, 0x0e, 0x39, 0xf4, 0xb2, 0x7d, 0x80, 0xca, 0x71, - 0x97, 0x60, 0x81, 0x3f, 0xb4, 0x5e, 0x57, 0xca, 0x5b, 0xf0, 0x58, 0x88, 0x53, 0x8f, 0x9c, 0x56, - 0x28, 0xb9, 0xf9, 0x98, 0x27, 0x40, 0x3b, 0x6b, 0x2f, 0xa1, 0x97, 0xde, 0xfc, 0xff, 0xcf, 0xfc, - 0x66, 0x3c, 0xe3, 0x31, 0x42, 0x45, 0x1e, 0xa6, 0x41, 0x2e, 0x32, 0x99, 0xe1, 0x76, 0xbe, 0x9e, - 0xbe, 0xda, 0xc4, 0xf2, 0x6b, 0xb9, 0x0e, 0xa2, 0x2c, 0x39, 0xdf, 0x64, 0x9b, 0xec, 0x1c, 0x42, - 0xeb, 0xf2, 0x0b, 0x28, 0x10, 0xf0, 0x64, 0x10, 0xfa, 0x6b, 0x80, 0xba, 0x57, 0x79, 0x98, 0xe2, - 0x77, 0x68, 0x50, 0x70, 0x71, 0x1b, 0x47, 0x9c, 0x38, 0xbe, 0x33, 0x1b, 0x2d, 0x9e, 0x57, 0xca, - 0x6b, 0xac, 0x83, 0xf2, 0x4e, 0x92, 0x62, 0x33, 0xa7, 0xb5, 0xa6, 0xac, 0x89, 0xe0, 0x17, 0xa8, - 0x9b, 0x86, 0x09, 0x27, 0x6d, 0x80, 0x9e, 0x55, 0xca, 0x03, 0x7d, 0x50, 0x1e, 0x02, 0x42, 0x0b, - 0xca, 0xc0, 0xc3, 0x73, 0x34, 0x14, 0xbc, 0xc8, 0x4a, 0x11, 0x71, 0xd2, 0x81, 0x7c, 0xb7, 0x52, - 0x9e, 0xf5, 0x0e, 0xca, 0x7b, 0x04, 0x4c, 0x63, 0x50, 0x66, 0x63, 0xf8, 0x02, 0x0d, 0xa4, 0x08, - 0x23, 0xbe, 0x5a, 0x92, 0xae, 0xef, 0xcc, 0xba, 0x06, 0x05, 0xeb, 0x3a, 0xbe, 0xb1, 0x68, 0x63, - 0x50, 0xd6, 0xa4, 0xe3, 0xb7, 0xa8, 0xaf, 0x77, 0xb4, 0x5a, 0x92, 0x1e, 0x80, 0x66, 0xb0, 0x3c, - 0x4c, 0x0d, 0x57, 0x0f, 0x66, 0x34, 0x65, 0x75, 0x2e, 0xfe, 0x80, 0x86, 0x79, 0x28, 0x78, 0x2a, - 0x57, 0x4b, 0xd2, 0x07, 0xce, 0xaf, 0x94, 0x37, 0x32, 0x9e, 0x21, 0x4f, 0x81, 0xb4, 0x0e, 0x65, - 0x96, 0xc0, 0x01, 0xea, 0x15, 0x32, 0x14, 0x92, 0x0c, 0x7c, 0x67, 0xd6, 0x59, 0x90, 0x4a, 0x79, - 0xc6, 0x38, 0x28, 0x6f, 0x6c, 0x1a, 0x6a, 0x45, 0x99, 0x71, 0xf5, 0x66, 0x6e, 0x4a, 0x11, 0xca, - 0x38, 0x4b, 0xc9, 0x10, 0x10, 0x18, 0xaf, 0xf1, 0xec, 0x78, 0x8d, 0x41, 0x99, 0x8d, 0xe9, 0x5e, - 0x5c, 0x88, 0x4c, 0x90, 0x91, 0xef, 0xcc, 0x7a, 0xa6, 0x17, 0x18, 0xb6, 0x17, 0x28, 0xca, 0x8c, - 0x8b, 0x3f, 0xa1, 0x6e, 0xc2, 0x65, 0x48, 0x90, 0xdf, 0x99, 0x8d, 0x5f, 0xe3, 0x20, 0x5f, 0x07, - 0xfa, 0x02, 0x82, 0x4b, 0x2e, 0xc3, 0xcf, 0xa9, 0x14, 0x5b, 0xf3, 0x15, 0x75, 0x8e, 0xfd, 0x8a, - 0x5a, 0x50, 0x06, 0x1e, 0xbe, 0x44, 0x83, 0x84, 0x4b, 0x11, 0x47, 0x05, 0x19, 0x43, 0x89, 0xb3, - 0xe3, 0x12, 0xda, 0x37, 0x55, 0x60, 0xcf, 0x75, 0xa6, 0xdd, 0x73, 0xad, 0x29, 0x6b, 0x22, 0xfa, - 0x80, 0xe4, 0x36, 0xe7, 0x64, 0xf2, 0xef, 0x80, 0xb4, 0xb6, 0xad, 0xb5, 0xa0, 0x0c, 0x3c, 0x1c, - 0xa3, 0xb1, 0x7e, 0x85, 0xeb, 0x42, 0x8a, 0x32, 0x92, 0xe4, 0x04, 0xda, 0x93, 0xff, 0x26, 0xb8, - 0x82, 0x90, 0x79, 0x83, 0xa0, 0x52, 0xde, 0xd9, 0x11, 0xf0, 0x32, 0x4b, 0x62, 0xc9, 0x93, 0x5c, - 0x6e, 0x0f, 0xca, 0x7b, 0x62, 0x07, 0xab, 0xa3, 0x94, 0xa1, 0xc4, 0x16, 0x98, 0xbe, 0x47, 0x23, - 0xbb, 0x10, 0xfc, 0x18, 0x75, 0xbe, 0xf1, 0xad, 0xf9, 0x31, 0x98, 0x7e, 0xc4, 0x4f, 0x51, 0xef, - 0x36, 0xfc, 0x5e, 0xd6, 0x77, 0xcf, 0x8c, 0x98, 0xb7, 0x2f, 0x9c, 0xe9, 0x1c, 0x4d, 0x8e, 0xd7, - 0xf0, 0x10, 0xeb, 0x1c, 0xb3, 0x1f, 0xd1, 0xe9, 0xbd, 0x19, 0x1e, 0xc2, 0x27, 0x47, 0xf8, 0x82, - 0xfc, 0xdc, 0xb9, 0xce, 0xdd, 0xce, 0x75, 0xfe, 0xec, 0x5c, 0xe7, 0xc7, 0xde, 0x6d, 0xdd, 0xed, - 0xdd, 0xd6, 0xef, 0xbd, 0xdb, 0x5a, 0xf7, 0xe1, 0x6f, 0x7f, 0xf3, 0x37, 0x00, 0x00, 0xff, 0xff, - 0xd0, 0x61, 0xcf, 0x3b, 0x2e, 0x04, 0x00, 0x00, -} - -func (m *Span) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil +var file_span_proto_rawDesc = []byte{ + 0x0a, 0x0a, 0x73, 0x70, 0x61, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, + 0x22, 0xc2, 0x04, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x44, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x44, 0x12, 0x16, 0x0a, + 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x73, + 0x70, 0x61, 0x6e, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, + 0x44, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, + 0x44, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x26, 0x0a, 0x04, 0x6d, 0x65, 0x74, + 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x70, 0x61, + 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x6d, 0x65, 0x74, + 0x61, 0x12, 0x2f, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x0b, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x73, + 0x74, 0x72, 0x75, 0x63, 0x74, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x62, + 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x4d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x44, 0x61, 0x74, 0x61, 0x44, 0x6f, 0x67, 0x2f, 0x64, 0x61, 0x74, 0x61, + 0x64, 0x6f, 0x67, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func (m *Span) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Service) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintSpan(dAtA, i, uint64(len(m.Service))) - i += copy(dAtA[i:], m.Service) - } - if len(m.Name) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintSpan(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Resource) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintSpan(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - } - if m.TraceID != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintSpan(dAtA, i, uint64(m.TraceID)) - } - if m.SpanID != 0 { - dAtA[i] = 0x28 - i++ - i = encodeVarintSpan(dAtA, i, uint64(m.SpanID)) - } - if m.ParentID != 0 { - dAtA[i] = 0x30 - i++ - i = encodeVarintSpan(dAtA, i, uint64(m.ParentID)) - } - if m.Start != 0 { - dAtA[i] = 0x38 - i++ - i = encodeVarintSpan(dAtA, i, uint64(m.Start)) - } - if m.Duration != 0 { - dAtA[i] = 0x40 - i++ - i = encodeVarintSpan(dAtA, i, uint64(m.Duration)) - } - if m.Error != 0 { - dAtA[i] = 0x48 - i++ - i = encodeVarintSpan(dAtA, i, uint64(m.Error)) - } - if len(m.Meta) > 0 { - for k, _ := range m.Meta { - dAtA[i] = 0x52 - i++ - v := m.Meta[k] - mapSize := 1 + len(k) + sovSpan(uint64(len(k))) + 1 + len(v) + sovSpan(uint64(len(v))) - i = encodeVarintSpan(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintSpan(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintSpan(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if len(m.Metrics) > 0 { - for k, _ := range m.Metrics { - dAtA[i] = 0x5a - i++ - v := m.Metrics[k] - mapSize := 1 + len(k) + sovSpan(uint64(len(k))) + 1 + 8 - i = encodeVarintSpan(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintSpan(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x11 - i++ - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(v)))) - i += 8 - } - } - if len(m.Type) > 0 { - dAtA[i] = 0x62 - i++ - i = encodeVarintSpan(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - } - if len(m.MetaStruct) > 0 { - for k, _ := range m.MetaStruct { - dAtA[i] = 0x6a - i++ - v := m.MetaStruct[k] - byteSize := 0 - if len(v) > 0 { - byteSize = 1 + len(v) + sovSpan(uint64(len(v))) - } - mapSize := 1 + len(k) + sovSpan(uint64(len(k))) + byteSize - i = encodeVarintSpan(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintSpan(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - if len(v) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintSpan(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - } - return i, nil -} +var ( + file_span_proto_rawDescOnce sync.Once + file_span_proto_rawDescData = file_span_proto_rawDesc +) -func encodeVarintSpan(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Span) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Service) - if l > 0 { - n += 1 + l + sovSpan(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovSpan(uint64(l)) - } - l = len(m.Resource) - if l > 0 { - n += 1 + l + sovSpan(uint64(l)) - } - if m.TraceID != 0 { - n += 1 + sovSpan(uint64(m.TraceID)) - } - if m.SpanID != 0 { - n += 1 + sovSpan(uint64(m.SpanID)) - } - if m.ParentID != 0 { - n += 1 + sovSpan(uint64(m.ParentID)) - } - if m.Start != 0 { - n += 1 + sovSpan(uint64(m.Start)) - } - if m.Duration != 0 { - n += 1 + sovSpan(uint64(m.Duration)) - } - if m.Error != 0 { - n += 1 + sovSpan(uint64(m.Error)) - } - if len(m.Meta) > 0 { - for k, v := range m.Meta { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovSpan(uint64(len(k))) + 1 + len(v) + sovSpan(uint64(len(v))) - n += mapEntrySize + 1 + sovSpan(uint64(mapEntrySize)) - } - } - if len(m.Metrics) > 0 { - for k, v := range m.Metrics { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovSpan(uint64(len(k))) + 1 + 8 - n += mapEntrySize + 1 + sovSpan(uint64(mapEntrySize)) - } - } - l = len(m.Type) - if l > 0 { - n += 1 + l + sovSpan(uint64(l)) - } - if len(m.MetaStruct) > 0 { - for k, v := range m.MetaStruct { - _ = k - _ = v - l = 0 - if len(v) > 0 { - l = 1 + len(v) + sovSpan(uint64(len(v))) - } - mapEntrySize := 1 + len(k) + sovSpan(uint64(len(k))) + l - n += mapEntrySize + 1 + sovSpan(uint64(mapEntrySize)) - } - } - return n +func file_span_proto_rawDescGZIP() []byte { + file_span_proto_rawDescOnce.Do(func() { + file_span_proto_rawDescData = protoimpl.X.CompressGZIP(file_span_proto_rawDescData) + }) + return file_span_proto_rawDescData } -func sovSpan(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n +var file_span_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_span_proto_goTypes = []interface{}{ + (*Span)(nil), // 0: pb.Span + nil, // 1: pb.Span.MetaEntry + nil, // 2: pb.Span.MetricsEntry + nil, // 3: pb.Span.MetaStructEntry } -func sozSpan(x uint64) (n int) { - return sovSpan(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +var file_span_proto_depIdxs = []int32{ + 1, // 0: pb.Span.meta:type_name -> pb.Span.MetaEntry + 2, // 1: pb.Span.metrics:type_name -> pb.Span.MetricsEntry + 3, // 2: pb.Span.meta_struct:type_name -> pb.Span.MetaStructEntry + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name } -func (m *Span) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSpan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Span: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Span: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSpan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSpan - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSpan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Service = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSpan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSpan - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSpan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSpan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSpan - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSpan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resource = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceID", wireType) - } - m.TraceID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSpan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TraceID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SpanID", wireType) - } - m.SpanID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSpan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SpanID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ParentID", wireType) - } - m.ParentID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSpan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ParentID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) - } - m.Start = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSpan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Start |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) - } - m.Duration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSpan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Duration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - m.Error = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSpan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Error |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSpan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSpan - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSpan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Meta == nil { - m.Meta = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSpan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSpan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthSpan - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthSpan - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSpan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthSpan - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthSpan - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipSpan(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthSpan - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Meta[mapkey] = mapvalue - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSpan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSpan - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSpan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metrics == nil { - m.Metrics = make(map[string]float64) - } - var mapkey string - var mapvalue float64 - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSpan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSpan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthSpan - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthSpan - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapvaluetemp uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - mapvaluetemp = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - mapvalue = math.Float64frombits(mapvaluetemp) - } else { - iNdEx = entryPreIndex - skippy, err := skipSpan(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthSpan - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Metrics[mapkey] = mapvalue - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSpan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSpan - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSpan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetaStruct", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSpan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSpan - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSpan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MetaStruct == nil { - m.MetaStruct = make(map[string][]byte) - } - var mapkey string - mapvalue := []byte{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSpan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSpan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthSpan - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthSpan - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapbyteLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSpan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapbyteLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intMapbyteLen := int(mapbyteLen) - if intMapbyteLen < 0 { - return ErrInvalidLengthSpan - } - postbytesIndex := iNdEx + intMapbyteLen - if postbytesIndex < 0 { - return ErrInvalidLengthSpan - } - if postbytesIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = make([]byte, mapbyteLen) - copy(mapvalue, dAtA[iNdEx:postbytesIndex]) - iNdEx = postbytesIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipSpan(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthSpan - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.MetaStruct[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSpan(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthSpan - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthSpan - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipSpan(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSpan - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break +func init() { file_span_proto_init() } +func file_span_proto_init() { + if File_span_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_span_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Span); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSpan - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSpan - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthSpan - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthSpan - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSpan - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipSpan(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthSpan - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } } - panic("unreachable") + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_span_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_span_proto_goTypes, + DependencyIndexes: file_span_proto_depIdxs, + MessageInfos: file_span_proto_msgTypes, + }.Build() + File_span_proto = out.File + file_span_proto_rawDesc = nil + file_span_proto_goTypes = nil + file_span_proto_depIdxs = nil } - -var ( - ErrInvalidLengthSpan = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowSpan = fmt.Errorf("proto: integer overflow") -) diff --git a/pkg/trace/pb/span.proto b/pkg/trace/pb/span.proto index 0165ed851bd17..de10f5fa83d59 100644 --- a/pkg/trace/pb/span.proto +++ b/pkg/trace/pb/span.proto @@ -1,34 +1,46 @@ syntax = "proto3"; package pb; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +option go_package="github.com/DataDog/datadog-agent/pkg/trace/pb"; message Span { // service is the name of the service with which this span is associated. - string service = 1 [(gogoproto.jsontag) = "service", (gogoproto.moretags) = "msg:\"service\""]; + // @gotags: json:"service" msg:"service" + string service = 1; // name is the operation name of this span. - string name = 2 [(gogoproto.jsontag) = "name", (gogoproto.moretags) = "msg:\"name\""]; + // @gotags: json:"name" msg:"name" + string name = 2; // resource is the resource name of this span, also sometimes called the endpoint (for web spans). - string resource = 3 [(gogoproto.jsontag) = "resource", (gogoproto.moretags) = "msg:\"resource\""]; + // @gotags: json:"resource" msg:"resource" + string resource = 3; // traceID is the ID of the trace to which this span belongs. - uint64 traceID = 4 [(gogoproto.jsontag) = "trace_id", (gogoproto.moretags) = "msg:\"trace_id\""]; + // @gotags: json:"trace_id" msg:"trace_id" + uint64 traceID = 4; // spanID is the ID of this span. - uint64 spanID = 5 [(gogoproto.jsontag) = "span_id", (gogoproto.moretags) = "msg:\"span_id\""]; + // @gotags: json:"span_id" msg:"span_id" + uint64 spanID = 5; // parentID is the ID of this span's parent, or zero if this span has no parent. - uint64 parentID = 6 [(gogoproto.jsontag) = "parent_id", (gogoproto.moretags) = "msg:\"parent_id\""]; + // @gotags: json:"parent_id" msg:"parent_id" + uint64 parentID = 6; // start is the number of nanoseconds between the Unix epoch and the beginning of this span. - int64 start = 7 [(gogoproto.jsontag) = "start", (gogoproto.moretags) = "msg:\"start\""]; + // @gotags: json:"start" msg:"start" + int64 start = 7; // duration is the time length of this span in nanoseconds. - int64 duration = 8 [(gogoproto.jsontag) = "duration", (gogoproto.moretags) = "msg:\"duration\""]; + // @gotags: json:"duration" msg:"duration" + int64 duration = 8; // error is 1 if there is an error associated with this span, or 0 if there is not. - int32 error = 9 [(gogoproto.jsontag) = "error", (gogoproto.moretags) = "msg:\"error\""]; + // @gotags: json:"error" msg:"error" + int32 error = 9; // meta is a mapping from tag name to tag value for string-valued tags. - map meta = 10 [(gogoproto.jsontag) = "meta", (gogoproto.moretags) = "msg:\"meta\""]; + // @gotags: json:"meta" msg:"meta" + map meta = 10; // metrics is a mapping from tag name to tag value for numeric-valued tags. - map metrics = 11 [(gogoproto.jsontag) = "metrics", (gogoproto.moretags) = "msg:\"metrics\""]; + // @gotags: json:"metrics" msg:"metrics" + map metrics = 11; // type is the type of the service with which this span is associated. Example values: web, db, lambda. - string type = 12 [(gogoproto.jsontag) = "type", (gogoproto.moretags) = "msg:\"type\""]; + // @gotags: json:"type" msg:"type" + string type = 12; // meta_struct is a registry of structured "other" data used by, e.g., AppSec. - map meta_struct = 13 [(gogoproto.jsontag) = "meta_struct,omitempty", (gogoproto.moretags) = "msg:\"meta_struct\""]; + // @gotags: json:"meta_struct,omitempty" msg:"meta_struct" + map meta_struct = 13; } diff --git a/pkg/trace/pb/span_utils.go b/pkg/trace/pb/span_utils.go new file mode 100644 index 0000000000000..e8da5dea22508 --- /dev/null +++ b/pkg/trace/pb/span_utils.go @@ -0,0 +1,51 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package pb + +// spanCopiedFields records the fields that are copied in ShallowCopy. +// This should match exactly the fields set in (*Span).ShallowCopy. +// This is used by tests to enforce the correctness of ShallowCopy. +var spanCopiedFields = map[string]struct{}{ + "Service": {}, + "Name": {}, + "Resource": {}, + "TraceID": {}, + "SpanID": {}, + "ParentID": {}, + "Start": {}, + "Duration": {}, + "Error": {}, + "Meta": {}, + "Metrics": {}, + "Type": {}, + "MetaStruct": {}, +} + +// ShallowCopy returns a shallow copy of the copy-able portion of a Span. These are the +// public fields which will have a Get* method for them. The completeness of this +// method is enforced by the init function above. Instead of using pkg/proto/utils.ProtoCopier, +// which incurs heavy reflection cost for every copy at runtime, we use reflection once at +// startup to ensure our method is complete. +func (s *Span) ShallowCopy() *Span { + if s == nil { + return &Span{} + } + return &Span{ + Service: s.Service, + Name: s.Name, + Resource: s.Resource, + TraceID: s.TraceID, + SpanID: s.SpanID, + ParentID: s.ParentID, + Start: s.Start, + Duration: s.Duration, + Error: s.Error, + Meta: s.Meta, + Metrics: s.Metrics, + Type: s.Type, + MetaStruct: s.MetaStruct, + } +} diff --git a/pkg/trace/pb/span_vtproto.pb.go b/pkg/trace/pb/span_vtproto.pb.go new file mode 100644 index 0000000000000..9614470ba845c --- /dev/null +++ b/pkg/trace/pb/span_vtproto.pb.go @@ -0,0 +1,994 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.4.0 +// source: span.proto + +package pb + +import ( + binary "encoding/binary" + fmt "fmt" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" + math "math" + bits "math/bits" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Span) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Span) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Span) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.MetaStruct) > 0 { + for k := range m.MetaStruct { + v := m.MetaStruct[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x6a + } + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarint(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x62 + } + if len(m.Metrics) > 0 { + for k := range m.Metrics { + v := m.Metrics[k] + baseI := i + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(v)))) + i-- + dAtA[i] = 0x11 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x5a + } + } + if len(m.Meta) > 0 { + for k := range m.Meta { + v := m.Meta[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x52 + } + } + if m.Error != 0 { + i = encodeVarint(dAtA, i, uint64(m.Error)) + i-- + dAtA[i] = 0x48 + } + if m.Duration != 0 { + i = encodeVarint(dAtA, i, uint64(m.Duration)) + i-- + dAtA[i] = 0x40 + } + if m.Start != 0 { + i = encodeVarint(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x38 + } + if m.ParentID != 0 { + i = encodeVarint(dAtA, i, uint64(m.ParentID)) + i-- + dAtA[i] = 0x30 + } + if m.SpanID != 0 { + i = encodeVarint(dAtA, i, uint64(m.SpanID)) + i-- + dAtA[i] = 0x28 + } + if m.TraceID != 0 { + i = encodeVarint(dAtA, i, uint64(m.TraceID)) + i-- + dAtA[i] = 0x20 + } + if len(m.Resource) > 0 { + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarint(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.Service) > 0 { + i -= len(m.Service) + copy(dAtA[i:], m.Service) + i = encodeVarint(dAtA, i, uint64(len(m.Service))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= sov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Span) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Service) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Resource) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.TraceID != 0 { + n += 1 + sov(uint64(m.TraceID)) + } + if m.SpanID != 0 { + n += 1 + sov(uint64(m.SpanID)) + } + if m.ParentID != 0 { + n += 1 + sov(uint64(m.ParentID)) + } + if m.Start != 0 { + n += 1 + sov(uint64(m.Start)) + } + if m.Duration != 0 { + n += 1 + sov(uint64(m.Duration)) + } + if m.Error != 0 { + n += 1 + sov(uint64(m.Error)) + } + if len(m.Meta) > 0 { + for k, v := range m.Meta { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + if len(m.Metrics) > 0 { + for k, v := range m.Metrics { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + 8 + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + l = len(m.Type) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.MetaStruct) > 0 { + for k, v := range m.MetaStruct { + _ = k + _ = v + l = 1 + len(v) + sov(uint64(len(v))) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func sov(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} +func soz(x uint64) (n int) { + return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Span) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Span: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Span: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Service = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceID", wireType) + } + m.TraceID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TraceID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanID", wireType) + } + m.SpanID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SpanID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ParentID", wireType) + } + m.ParentID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ParentID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + m.Start = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Start |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + m.Duration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Duration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + m.Error = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Error |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Meta == nil { + m.Meta = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Meta[mapkey] = mapvalue + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metrics == nil { + m.Metrics = make(map[string]float64) + } + var mapkey string + var mapvalue float64 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapvaluetemp uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + mapvaluetemp = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + mapvalue = math.Float64frombits(mapvaluetemp) + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Metrics[mapkey] = mapvalue + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetaStruct", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetaStruct == nil { + m.MetaStruct = make(map[string][]byte) + } + var mapkey string + var mapvalue []byte + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLength + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex < 0 { + return ErrInvalidLength + } + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MetaStruct[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func skip(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLength + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLength + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLength = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflow = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroup = fmt.Errorf("proto: unexpected end of group") +) diff --git a/pkg/trace/pb/trace.go b/pkg/trace/pb/trace.go index 34ee6cb1e48ea..aa10697965d7d 100644 --- a/pkg/trace/pb/trace.go +++ b/pkg/trace/pb/trace.go @@ -33,7 +33,18 @@ func (p *TracerPayload) Cut(i int) *TracerPayload { if i > len(p.Chunks) { i = len(p.Chunks) } - new := *p + new := TracerPayload{ + ContainerID: p.GetContainerID(), + LanguageName: p.GetLanguageName(), + LanguageVersion: p.GetLanguageVersion(), + TracerVersion: p.GetTracerVersion(), + RuntimeID: p.GetRuntimeID(), + Env: p.GetEnv(), + Hostname: p.GetHostname(), + AppVersion: p.GetAppVersion(), + Tags: p.GetTags(), + } + new.Chunks = p.Chunks[:i] p.Chunks = p.Chunks[i:] diff --git a/pkg/trace/pb/tracer_payload.pb.go b/pkg/trace/pb/tracer_payload.pb.go index 2c70e730f16e8..2f3bc339e0beb 100644 --- a/pkg/trace/pb/tracer_payload.pb.go +++ b/pkg/trace/pb/tracer_payload.pb.go @@ -1,1424 +1,388 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 // source: tracer_payload.proto package pb import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // TraceChunk represents a list of spans with the same trace ID. In other words, a chunk of a trace. type TraceChunk struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // priority specifies sampling priority of the trace. + // @gotags: json:"priority" msg:"priority" Priority int32 `protobuf:"varint,1,opt,name=priority,proto3" json:"priority" msg:"priority"` // origin specifies origin product ("lambda", "rum", etc.) of the trace. + // @gotags: json:"origin" msg:"origin" Origin string `protobuf:"bytes,2,opt,name=origin,proto3" json:"origin" msg:"origin"` // spans specifies list of containing spans. + // @gotags: json:"spans" msg:"spans" Spans []*Span `protobuf:"bytes,3,rep,name=spans,proto3" json:"spans" msg:"spans"` // tags specifies tags common in all `spans`. - Tags map[string]string `protobuf:"bytes,4,rep,name=tags,proto3" json:"tags" msg:"tags" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // @gotags: json:"tags" msg:"tags" + Tags map[string]string `protobuf:"bytes,4,rep,name=tags,proto3" json:"tags" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" msg:"tags"` // droppedTrace specifies whether the trace was dropped by samplers or not. + // @gotags: json:"dropped_trace" msg:"dropped_trace" DroppedTrace bool `protobuf:"varint,5,opt,name=droppedTrace,proto3" json:"dropped_trace" msg:"dropped_trace"` } -func (m *TraceChunk) Reset() { *m = TraceChunk{} } -func (m *TraceChunk) String() string { return proto.CompactTextString(m) } -func (*TraceChunk) ProtoMessage() {} -func (*TraceChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_f02adc68a2cbcd51, []int{0} +func (x *TraceChunk) Reset() { + *x = TraceChunk{} + if protoimpl.UnsafeEnabled { + mi := &file_tracer_payload_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *TraceChunk) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + +func (x *TraceChunk) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *TraceChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TraceChunk.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err + +func (*TraceChunk) ProtoMessage() {} + +func (x *TraceChunk) ProtoReflect() protoreflect.Message { + mi := &file_tracer_payload_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *TraceChunk) XXX_Merge(src proto.Message) { - xxx_messageInfo_TraceChunk.Merge(m, src) -} -func (m *TraceChunk) XXX_Size() int { - return m.Size() -} -func (m *TraceChunk) XXX_DiscardUnknown() { - xxx_messageInfo_TraceChunk.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_TraceChunk proto.InternalMessageInfo +// Deprecated: Use TraceChunk.ProtoReflect.Descriptor instead. +func (*TraceChunk) Descriptor() ([]byte, []int) { + return file_tracer_payload_proto_rawDescGZIP(), []int{0} +} -func (m *TraceChunk) GetPriority() int32 { - if m != nil { - return m.Priority +func (x *TraceChunk) GetPriority() int32 { + if x != nil { + return x.Priority } return 0 } -func (m *TraceChunk) GetOrigin() string { - if m != nil { - return m.Origin +func (x *TraceChunk) GetOrigin() string { + if x != nil { + return x.Origin } return "" } -func (m *TraceChunk) GetSpans() []*Span { - if m != nil { - return m.Spans +func (x *TraceChunk) GetSpans() []*Span { + if x != nil { + return x.Spans } return nil } -func (m *TraceChunk) GetTags() map[string]string { - if m != nil { - return m.Tags +func (x *TraceChunk) GetTags() map[string]string { + if x != nil { + return x.Tags } return nil } -func (m *TraceChunk) GetDroppedTrace() bool { - if m != nil { - return m.DroppedTrace +func (x *TraceChunk) GetDroppedTrace() bool { + if x != nil { + return x.DroppedTrace } return false } // TracerPayload represents a payload the trace agent receives from tracers. type TracerPayload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // containerID specifies the ID of the container where the tracer is running on. + // @gotags: json:"container_id" msg:"container_id" ContainerID string `protobuf:"bytes,1,opt,name=containerID,proto3" json:"container_id" msg:"container_id"` // languageName specifies language of the tracer. + // @gotags: json:"language_name" msg:"language_name" LanguageName string `protobuf:"bytes,2,opt,name=languageName,proto3" json:"language_name" msg:"language_name"` // languageVersion specifies language version of the tracer. + // @gotags: json:"language_version" msg:"language_version" LanguageVersion string `protobuf:"bytes,3,opt,name=languageVersion,proto3" json:"language_version" msg:"language_version"` // tracerVersion specifies version of the tracer. + // @gotags: json:"tracer_version" msg:"tracer_version" TracerVersion string `protobuf:"bytes,4,opt,name=tracerVersion,proto3" json:"tracer_version" msg:"tracer_version"` // runtimeID specifies V4 UUID representation of a tracer session. + // @gotags: json:"runtime_id" msg:"runtime_id" RuntimeID string `protobuf:"bytes,5,opt,name=runtimeID,proto3" json:"runtime_id" msg:"runtime_id"` // chunks specifies list of containing trace chunks. + // @gotags: json:"chunks" msg:"chunks" Chunks []*TraceChunk `protobuf:"bytes,6,rep,name=chunks,proto3" json:"chunks" msg:"chunks"` // tags specifies tags common in all `chunks`. - Tags map[string]string `protobuf:"bytes,7,rep,name=tags,proto3" json:"tags" msg:"tags" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // @gotags: json:"tags" msg:"tags" + Tags map[string]string `protobuf:"bytes,7,rep,name=tags,proto3" json:"tags" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" msg:"tags"` // env specifies `env` tag that set with the tracer. + // @gotags: json:"env" msg:"env" Env string `protobuf:"bytes,8,opt,name=env,proto3" json:"env" msg:"env"` // hostname specifies hostname of where the tracer is running. + // @gotags: json:"hostname" msg:"hostname" Hostname string `protobuf:"bytes,9,opt,name=hostname,proto3" json:"hostname" msg:"hostname"` // version specifies `version` tag that set with the tracer. + // @gotags: json:"app_version" msg:"app_version" AppVersion string `protobuf:"bytes,10,opt,name=appVersion,proto3" json:"app_version" msg:"app_version"` } -func (m *TracerPayload) Reset() { *m = TracerPayload{} } -func (m *TracerPayload) String() string { return proto.CompactTextString(m) } -func (*TracerPayload) ProtoMessage() {} -func (*TracerPayload) Descriptor() ([]byte, []int) { - return fileDescriptor_f02adc68a2cbcd51, []int{1} +func (x *TracerPayload) Reset() { + *x = TracerPayload{} + if protoimpl.UnsafeEnabled { + mi := &file_tracer_payload_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *TracerPayload) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + +func (x *TracerPayload) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *TracerPayload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TracerPayload.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err + +func (*TracerPayload) ProtoMessage() {} + +func (x *TracerPayload) ProtoReflect() protoreflect.Message { + mi := &file_tracer_payload_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *TracerPayload) XXX_Merge(src proto.Message) { - xxx_messageInfo_TracerPayload.Merge(m, src) -} -func (m *TracerPayload) XXX_Size() int { - return m.Size() -} -func (m *TracerPayload) XXX_DiscardUnknown() { - xxx_messageInfo_TracerPayload.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_TracerPayload proto.InternalMessageInfo +// Deprecated: Use TracerPayload.ProtoReflect.Descriptor instead. +func (*TracerPayload) Descriptor() ([]byte, []int) { + return file_tracer_payload_proto_rawDescGZIP(), []int{1} +} -func (m *TracerPayload) GetContainerID() string { - if m != nil { - return m.ContainerID +func (x *TracerPayload) GetContainerID() string { + if x != nil { + return x.ContainerID } return "" } -func (m *TracerPayload) GetLanguageName() string { - if m != nil { - return m.LanguageName +func (x *TracerPayload) GetLanguageName() string { + if x != nil { + return x.LanguageName } return "" } -func (m *TracerPayload) GetLanguageVersion() string { - if m != nil { - return m.LanguageVersion +func (x *TracerPayload) GetLanguageVersion() string { + if x != nil { + return x.LanguageVersion } return "" } -func (m *TracerPayload) GetTracerVersion() string { - if m != nil { - return m.TracerVersion +func (x *TracerPayload) GetTracerVersion() string { + if x != nil { + return x.TracerVersion } return "" } -func (m *TracerPayload) GetRuntimeID() string { - if m != nil { - return m.RuntimeID +func (x *TracerPayload) GetRuntimeID() string { + if x != nil { + return x.RuntimeID } return "" } -func (m *TracerPayload) GetChunks() []*TraceChunk { - if m != nil { - return m.Chunks +func (x *TracerPayload) GetChunks() []*TraceChunk { + if x != nil { + return x.Chunks } return nil } -func (m *TracerPayload) GetTags() map[string]string { - if m != nil { - return m.Tags +func (x *TracerPayload) GetTags() map[string]string { + if x != nil { + return x.Tags } return nil } -func (m *TracerPayload) GetEnv() string { - if m != nil { - return m.Env +func (x *TracerPayload) GetEnv() string { + if x != nil { + return x.Env } return "" } -func (m *TracerPayload) GetHostname() string { - if m != nil { - return m.Hostname +func (x *TracerPayload) GetHostname() string { + if x != nil { + return x.Hostname } return "" } -func (m *TracerPayload) GetAppVersion() string { - if m != nil { - return m.AppVersion +func (x *TracerPayload) GetAppVersion() string { + if x != nil { + return x.AppVersion } return "" } -func init() { - proto.RegisterType((*TraceChunk)(nil), "pb.TraceChunk") - proto.RegisterMapType((map[string]string)(nil), "pb.TraceChunk.TagsEntry") - proto.RegisterType((*TracerPayload)(nil), "pb.TracerPayload") - proto.RegisterMapType((map[string]string)(nil), "pb.TracerPayload.TagsEntry") -} - -func init() { proto.RegisterFile("tracer_payload.proto", fileDescriptor_f02adc68a2cbcd51) } - -var fileDescriptor_f02adc68a2cbcd51 = []byte{ - // 638 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x4f, 0x6b, 0xdb, 0x3e, - 0x18, 0xc7, 0xeb, 0xa6, 0xc9, 0x2f, 0x56, 0xda, 0xfe, 0x3a, 0x2d, 0x14, 0x93, 0x81, 0x15, 0xc4, - 0x28, 0x61, 0xb0, 0x14, 0xba, 0xc3, 0x4a, 0x4f, 0xc3, 0xcb, 0xd8, 0x0a, 0x63, 0x14, 0xad, 0x8c, - 0xdd, 0x82, 0x92, 0x78, 0xae, 0x69, 0x23, 0x0b, 0xd9, 0x0e, 0xf4, 0x3c, 0x76, 0xdf, 0xcb, 0xda, - 0xb1, 0xc7, 0x9d, 0xc4, 0x68, 0x6f, 0x3e, 0xfa, 0x15, 0x0c, 0x3f, 0xb2, 0x9d, 0x78, 0xa7, 0xb1, - 0x9b, 0x9e, 0xcf, 0xa3, 0xef, 0x37, 0x7e, 0xfe, 0x28, 0xa8, 0x9f, 0x28, 0x3e, 0xf7, 0xd5, 0x54, - 0xf2, 0xdb, 0x9b, 0x88, 0x2f, 0xc6, 0x52, 0x45, 0x49, 0x84, 0xb7, 0xe5, 0x6c, 0xf0, 0x3c, 0x08, - 0x93, 0xab, 0x74, 0x36, 0x9e, 0x47, 0xcb, 0xe3, 0x20, 0x0a, 0xa2, 0x63, 0x48, 0xcd, 0xd2, 0x2f, - 0x10, 0x41, 0x00, 0x27, 0x23, 0x19, 0xa0, 0x58, 0x72, 0x61, 0xce, 0xf4, 0x6b, 0x0b, 0xa1, 0xcb, - 0xc2, 0xf7, 0xf5, 0x55, 0x2a, 0xae, 0xf1, 0x19, 0xea, 0x4a, 0x15, 0x46, 0x2a, 0x4c, 0x6e, 0x1d, - 0x6b, 0x68, 0x8d, 0xda, 0x9e, 0x9b, 0x69, 0x52, 0xb3, 0x5c, 0x93, 0xfd, 0x65, 0x1c, 0x9c, 0xd1, - 0x0a, 0x50, 0x56, 0xe7, 0xf0, 0x09, 0xea, 0x44, 0x2a, 0x0c, 0x42, 0xe1, 0x6c, 0x0f, 0xad, 0x91, - 0xed, 0x0d, 0x32, 0x4d, 0x4a, 0x92, 0x6b, 0xb2, 0x0b, 0x3a, 0x13, 0x52, 0x56, 0x72, 0x7c, 0x8a, - 0xda, 0xc5, 0xc7, 0xc4, 0x4e, 0x6b, 0xd8, 0x1a, 0xf5, 0x4e, 0xba, 0x63, 0x39, 0x1b, 0x7f, 0x94, - 0x5c, 0x78, 0x4e, 0xa6, 0x89, 0x49, 0xe5, 0x9a, 0xf4, 0x40, 0x0b, 0x11, 0x65, 0x86, 0xe2, 0x09, - 0xda, 0x49, 0x78, 0x10, 0x3b, 0x3b, 0x20, 0x74, 0x0a, 0xe1, 0xba, 0x8e, 0xf1, 0x25, 0x0f, 0xe2, - 0x37, 0x22, 0x51, 0xb7, 0xde, 0x61, 0xa6, 0x09, 0xdc, 0xcc, 0x35, 0x41, 0xe0, 0x53, 0x04, 0x94, - 0x01, 0xc3, 0xef, 0xd1, 0xee, 0x42, 0x45, 0x52, 0xfa, 0x0b, 0x10, 0x3b, 0xed, 0xa1, 0x35, 0xea, - 0x7a, 0xa3, 0x4c, 0x93, 0xbd, 0x92, 0x4f, 0xa1, 0xeb, 0xb9, 0x26, 0x8f, 0x41, 0xdc, 0xa0, 0x94, - 0x35, 0xd4, 0x83, 0x97, 0xc8, 0xae, 0x7f, 0x18, 0x1f, 0xa0, 0xd6, 0xb5, 0x6f, 0xba, 0x68, 0xb3, - 0xe2, 0x88, 0xfb, 0xa8, 0xbd, 0xe2, 0x37, 0xa9, 0x6f, 0xfa, 0xc3, 0x4c, 0x70, 0xb6, 0x7d, 0x6a, - 0xd1, 0x6f, 0x1d, 0xb4, 0x07, 0x16, 0xea, 0xc2, 0x0c, 0x17, 0xbf, 0x43, 0xbd, 0x79, 0x24, 0x12, - 0x1e, 0x0a, 0x5f, 0x9d, 0x4f, 0x8c, 0x8b, 0x77, 0x94, 0x69, 0xb2, 0x5b, 0xe3, 0x69, 0xb8, 0xc8, - 0x35, 0xc1, 0xf0, 0x59, 0x9b, 0x90, 0xb2, 0x4d, 0x69, 0x51, 0xe2, 0x0d, 0x17, 0x41, 0xca, 0x03, - 0xff, 0x03, 0x5f, 0x96, 0x3f, 0x6e, 0x4a, 0xac, 0xf8, 0x54, 0xf0, 0xe5, 0xba, 0xc4, 0x06, 0xa5, - 0xac, 0xa1, 0xc6, 0x9f, 0xd1, 0xff, 0x55, 0xfc, 0xc9, 0x57, 0x71, 0x18, 0x09, 0xa7, 0x05, 0x86, - 0xe3, 0x4c, 0x93, 0x83, 0x5a, 0xba, 0x32, 0xb9, 0x5c, 0x93, 0xc3, 0xa6, 0x67, 0x99, 0xa0, 0xec, - 0x4f, 0x1b, 0x7c, 0x81, 0xf6, 0xcc, 0x82, 0x57, 0xbe, 0x3b, 0xe0, 0xfb, 0x2c, 0xd3, 0x64, 0xbf, - 0xdc, 0xfc, 0xb5, 0x6b, 0xdf, 0x4c, 0xb2, 0x81, 0x29, 0x6b, 0x1a, 0xe0, 0x57, 0xc8, 0x56, 0xa9, - 0x48, 0xc2, 0xa5, 0x7f, 0x3e, 0x81, 0xc9, 0xda, 0x1e, 0xcd, 0x34, 0x41, 0x25, 0x34, 0xfd, 0x3b, - 0x00, 0xa7, 0x35, 0xa2, 0x6c, 0x2d, 0xc2, 0x1e, 0xea, 0xcc, 0x8b, 0x7d, 0x8a, 0x9d, 0x0e, 0xac, - 0xd9, 0x7e, 0x73, 0xcd, 0xcc, 0x8a, 0x9b, 0x1b, 0xf5, 0x8a, 0x9b, 0x90, 0xb2, 0x92, 0xe3, 0xb7, - 0xe5, 0xa2, 0xfe, 0x07, 0x0e, 0x4f, 0x6a, 0x87, 0x6a, 0xd4, 0x7f, 0xbd, 0xab, 0x47, 0xa8, 0xe5, - 0x8b, 0x95, 0xd3, 0x85, 0x42, 0xfa, 0x99, 0x26, 0x45, 0x98, 0x6b, 0x62, 0xc3, 0x4d, 0x5f, 0xac, - 0x28, 0x2b, 0x48, 0xf1, 0x86, 0xaf, 0xa2, 0x38, 0x29, 0xa6, 0xe7, 0xd8, 0x70, 0x19, 0xde, 0x70, - 0xc5, 0xea, 0x37, 0x5c, 0x01, 0xca, 0xea, 0x1c, 0x9e, 0x20, 0xc4, 0xa5, 0xac, 0x26, 0x80, 0x40, - 0xfd, 0x34, 0xd3, 0xa4, 0xc7, 0xa5, 0xdc, 0x68, 0xff, 0x23, 0x30, 0xd8, 0x60, 0x94, 0x6d, 0xe8, - 0xfe, 0xf9, 0x1d, 0x78, 0xce, 0x8f, 0x7b, 0xd7, 0xba, 0xbb, 0x77, 0xad, 0x5f, 0xf7, 0xae, 0xf5, - 0xfd, 0xc1, 0xdd, 0xba, 0x7b, 0x70, 0xb7, 0x7e, 0x3e, 0xb8, 0x5b, 0xb3, 0x0e, 0xfc, 0x5d, 0xbd, - 0xf8, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x17, 0x87, 0x13, 0xf6, 0x05, 0x05, 0x00, 0x00, -} - -func (m *TraceChunk) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TraceChunk) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Priority != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintTracerPayload(dAtA, i, uint64(m.Priority)) - } - if len(m.Origin) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintTracerPayload(dAtA, i, uint64(len(m.Origin))) - i += copy(dAtA[i:], m.Origin) - } - if len(m.Spans) > 0 { - for _, msg := range m.Spans { - dAtA[i] = 0x1a - i++ - i = encodeVarintTracerPayload(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Tags) > 0 { - for k, _ := range m.Tags { - dAtA[i] = 0x22 - i++ - v := m.Tags[k] - mapSize := 1 + len(k) + sovTracerPayload(uint64(len(k))) + 1 + len(v) + sovTracerPayload(uint64(len(v))) - i = encodeVarintTracerPayload(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintTracerPayload(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintTracerPayload(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if m.DroppedTrace { - dAtA[i] = 0x28 - i++ - if m.DroppedTrace { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *TracerPayload) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TracerPayload) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ContainerID) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintTracerPayload(dAtA, i, uint64(len(m.ContainerID))) - i += copy(dAtA[i:], m.ContainerID) - } - if len(m.LanguageName) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintTracerPayload(dAtA, i, uint64(len(m.LanguageName))) - i += copy(dAtA[i:], m.LanguageName) - } - if len(m.LanguageVersion) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintTracerPayload(dAtA, i, uint64(len(m.LanguageVersion))) - i += copy(dAtA[i:], m.LanguageVersion) - } - if len(m.TracerVersion) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintTracerPayload(dAtA, i, uint64(len(m.TracerVersion))) - i += copy(dAtA[i:], m.TracerVersion) - } - if len(m.RuntimeID) > 0 { - dAtA[i] = 0x2a - i++ - i = encodeVarintTracerPayload(dAtA, i, uint64(len(m.RuntimeID))) - i += copy(dAtA[i:], m.RuntimeID) - } - if len(m.Chunks) > 0 { - for _, msg := range m.Chunks { - dAtA[i] = 0x32 - i++ - i = encodeVarintTracerPayload(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Tags) > 0 { - for k, _ := range m.Tags { - dAtA[i] = 0x3a - i++ - v := m.Tags[k] - mapSize := 1 + len(k) + sovTracerPayload(uint64(len(k))) + 1 + len(v) + sovTracerPayload(uint64(len(v))) - i = encodeVarintTracerPayload(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintTracerPayload(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintTracerPayload(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if len(m.Env) > 0 { - dAtA[i] = 0x42 - i++ - i = encodeVarintTracerPayload(dAtA, i, uint64(len(m.Env))) - i += copy(dAtA[i:], m.Env) - } - if len(m.Hostname) > 0 { - dAtA[i] = 0x4a - i++ - i = encodeVarintTracerPayload(dAtA, i, uint64(len(m.Hostname))) - i += copy(dAtA[i:], m.Hostname) - } - if len(m.AppVersion) > 0 { - dAtA[i] = 0x52 - i++ - i = encodeVarintTracerPayload(dAtA, i, uint64(len(m.AppVersion))) - i += copy(dAtA[i:], m.AppVersion) - } - return i, nil -} - -func encodeVarintTracerPayload(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *TraceChunk) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Priority != 0 { - n += 1 + sovTracerPayload(uint64(m.Priority)) - } - l = len(m.Origin) - if l > 0 { - n += 1 + l + sovTracerPayload(uint64(l)) - } - if len(m.Spans) > 0 { - for _, e := range m.Spans { - l = e.Size() - n += 1 + l + sovTracerPayload(uint64(l)) - } - } - if len(m.Tags) > 0 { - for k, v := range m.Tags { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovTracerPayload(uint64(len(k))) + 1 + len(v) + sovTracerPayload(uint64(len(v))) - n += mapEntrySize + 1 + sovTracerPayload(uint64(mapEntrySize)) - } - } - if m.DroppedTrace { - n += 2 - } - return n -} - -func (m *TracerPayload) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTracerPayload(uint64(l)) - } - l = len(m.LanguageName) - if l > 0 { - n += 1 + l + sovTracerPayload(uint64(l)) - } - l = len(m.LanguageVersion) - if l > 0 { - n += 1 + l + sovTracerPayload(uint64(l)) - } - l = len(m.TracerVersion) - if l > 0 { - n += 1 + l + sovTracerPayload(uint64(l)) - } - l = len(m.RuntimeID) - if l > 0 { - n += 1 + l + sovTracerPayload(uint64(l)) - } - if len(m.Chunks) > 0 { - for _, e := range m.Chunks { - l = e.Size() - n += 1 + l + sovTracerPayload(uint64(l)) - } - } - if len(m.Tags) > 0 { - for k, v := range m.Tags { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovTracerPayload(uint64(len(k))) + 1 + len(v) + sovTracerPayload(uint64(len(v))) - n += mapEntrySize + 1 + sovTracerPayload(uint64(mapEntrySize)) - } - } - l = len(m.Env) - if l > 0 { - n += 1 + l + sovTracerPayload(uint64(l)) - } - l = len(m.Hostname) - if l > 0 { - n += 1 + l + sovTracerPayload(uint64(l)) - } - l = len(m.AppVersion) - if l > 0 { - n += 1 + l + sovTracerPayload(uint64(l)) - } - return n +var File_tracer_payload_proto protoreflect.FileDescriptor + +var file_tracer_payload_proto_rawDesc = []byte{ + 0x0a, 0x14, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x0a, 0x73, 0x70, 0x61, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xeb, 0x01, 0x0a, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, + 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, + 0x79, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x1e, 0x0a, 0x05, 0x73, 0x70, 0x61, + 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x70, + 0x61, 0x6e, 0x52, 0x05, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x2c, 0x0a, 0x04, 0x74, 0x61, 0x67, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, + 0x63, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x64, 0x72, 0x6f, 0x70, 0x70, + 0x65, 0x64, 0x54, 0x72, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x64, + 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x54, 0x72, 0x61, 0x63, 0x65, 0x1a, 0x37, 0x0a, 0x09, 0x54, + 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa3, 0x03, 0x0a, 0x0d, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x22, 0x0a, 0x0c, 0x6c, 0x61, 0x6e, 0x67, + 0x75, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x0f, + 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, + 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44, 0x12, 0x26, 0x0a, 0x06, 0x63, 0x68, + 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, + 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x06, 0x63, 0x68, 0x75, 0x6e, + 0x6b, 0x73, 0x12, 0x2f, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x61, 0x79, 0x6c, + 0x6f, 0x61, 0x64, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x74, + 0x61, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x76, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x65, 0x6e, 0x76, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44, 0x61, 0x74, 0x61, 0x44, 0x6f, 0x67, + 0x2f, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x70, + 0x6b, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } -func sovTracerPayload(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozTracerPayload(x uint64) (n int) { - return sovTracerPayload(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *TraceChunk) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TraceChunk: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TraceChunk: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) - } - m.Priority = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Priority |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Origin", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTracerPayload - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTracerPayload - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Origin = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spans", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTracerPayload - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTracerPayload - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Spans = append(m.Spans, &Span{}) - if err := m.Spans[len(m.Spans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTracerPayload - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTracerPayload - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Tags == nil { - m.Tags = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthTracerPayload - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthTracerPayload - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthTracerPayload - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthTracerPayload - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipTracerPayload(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthTracerPayload - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Tags[mapkey] = mapvalue - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DroppedTrace", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DroppedTrace = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipTracerPayload(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthTracerPayload - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTracerPayload - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TracerPayload) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TracerPayload: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TracerPayload: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTracerPayload - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTracerPayload - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LanguageName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTracerPayload - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTracerPayload - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LanguageName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LanguageVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTracerPayload - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTracerPayload - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LanguageVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TracerVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTracerPayload - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTracerPayload - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TracerVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTracerPayload - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTracerPayload - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RuntimeID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTracerPayload - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTracerPayload - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Chunks = append(m.Chunks, &TraceChunk{}) - if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTracerPayload - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTracerPayload - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Tags == nil { - m.Tags = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthTracerPayload - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthTracerPayload - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthTracerPayload - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthTracerPayload - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipTracerPayload(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthTracerPayload - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Tags[mapkey] = mapvalue - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTracerPayload - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTracerPayload - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTracerPayload - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTracerPayload - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hostname = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AppVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTracerPayload - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTracerPayload - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AppVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTracerPayload(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthTracerPayload - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTracerPayload - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } +var ( + file_tracer_payload_proto_rawDescOnce sync.Once + file_tracer_payload_proto_rawDescData = file_tracer_payload_proto_rawDesc +) - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTracerPayload(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break +func file_tracer_payload_proto_rawDescGZIP() []byte { + file_tracer_payload_proto_rawDescOnce.Do(func() { + file_tracer_payload_proto_rawDescData = protoimpl.X.CompressGZIP(file_tracer_payload_proto_rawDescData) + }) + return file_tracer_payload_proto_rawDescData +} + +var file_tracer_payload_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_tracer_payload_proto_goTypes = []interface{}{ + (*TraceChunk)(nil), // 0: pb.TraceChunk + (*TracerPayload)(nil), // 1: pb.TracerPayload + nil, // 2: pb.TraceChunk.TagsEntry + nil, // 3: pb.TracerPayload.TagsEntry + (*Span)(nil), // 4: pb.Span +} +var file_tracer_payload_proto_depIdxs = []int32{ + 4, // 0: pb.TraceChunk.spans:type_name -> pb.Span + 2, // 1: pb.TraceChunk.tags:type_name -> pb.TraceChunk.TagsEntry + 0, // 2: pb.TracerPayload.chunks:type_name -> pb.TraceChunk + 3, // 3: pb.TracerPayload.tags:type_name -> pb.TracerPayload.TagsEntry + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_tracer_payload_proto_init() } +func file_tracer_payload_proto_init() { + if File_tracer_payload_proto != nil { + return + } + file_span_proto_init() + if !protoimpl.UnsafeEnabled { + file_tracer_payload_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceChunk); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + file_tracer_payload_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TracerPayload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } - if length < 0 { - return 0, ErrInvalidLengthTracerPayload - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthTracerPayload - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTracerPayload - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipTracerPayload(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthTracerPayload - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } } - panic("unreachable") + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_tracer_payload_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_tracer_payload_proto_goTypes, + DependencyIndexes: file_tracer_payload_proto_depIdxs, + MessageInfos: file_tracer_payload_proto_msgTypes, + }.Build() + File_tracer_payload_proto = out.File + file_tracer_payload_proto_rawDesc = nil + file_tracer_payload_proto_goTypes = nil + file_tracer_payload_proto_depIdxs = nil } - -var ( - ErrInvalidLengthTracerPayload = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTracerPayload = fmt.Errorf("proto: integer overflow") -) diff --git a/pkg/trace/pb/tracer_payload.proto b/pkg/trace/pb/tracer_payload.proto index 7e61412699156..6b391b6c19959 100644 --- a/pkg/trace/pb/tracer_payload.proto +++ b/pkg/trace/pb/tracer_payload.proto @@ -1,44 +1,58 @@ syntax = "proto3"; package pb; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +option go_package="github.com/DataDog/datadog-agent/pkg/trace/pb"; import "span.proto"; // TraceChunk represents a list of spans with the same trace ID. In other words, a chunk of a trace. message TraceChunk { // priority specifies sampling priority of the trace. - int32 priority = 1 [(gogoproto.jsontag) = "priority", (gogoproto.moretags) = "msg:\"priority\""]; + // @gotags: json:"priority" msg:"priority" + int32 priority = 1; // origin specifies origin product ("lambda", "rum", etc.) of the trace. - string origin = 2 [(gogoproto.jsontag) = "origin", (gogoproto.moretags) = "msg:\"origin\""]; + // @gotags: json:"origin" msg:"origin" + string origin = 2; // spans specifies list of containing spans. - repeated Span spans = 3 [(gogoproto.jsontag) = "spans", (gogoproto.moretags) = "msg:\"spans\""]; + // @gotags: json:"spans" msg:"spans" + repeated Span spans = 3; // tags specifies tags common in all `spans`. - map tags = 4 [(gogoproto.jsontag) = "tags", (gogoproto.moretags) = "msg:\"tags\""]; + // @gotags: json:"tags" msg:"tags" + map tags = 4; // droppedTrace specifies whether the trace was dropped by samplers or not. - bool droppedTrace = 5 [(gogoproto.jsontag) = "dropped_trace", (gogoproto.moretags) = "msg:\"dropped_trace\""]; + // @gotags: json:"dropped_trace" msg:"dropped_trace" + bool droppedTrace = 5; } // TracerPayload represents a payload the trace agent receives from tracers. message TracerPayload { // containerID specifies the ID of the container where the tracer is running on. - string containerID = 1 [(gogoproto.jsontag) = "container_id", (gogoproto.moretags) = "msg:\"container_id\""]; + // @gotags: json:"container_id" msg:"container_id" + string containerID = 1; // languageName specifies language of the tracer. - string languageName = 2 [(gogoproto.jsontag) = "language_name", (gogoproto.moretags) = "msg:\"language_name\""]; + // @gotags: json:"language_name" msg:"language_name" + string languageName = 2; // languageVersion specifies language version of the tracer. - string languageVersion = 3 [(gogoproto.jsontag) = "language_version", (gogoproto.moretags) = "msg:\"language_version\""]; + // @gotags: json:"language_version" msg:"language_version" + string languageVersion = 3; // tracerVersion specifies version of the tracer. - string tracerVersion = 4 [(gogoproto.jsontag) = "tracer_version", (gogoproto.moretags) = "msg:\"tracer_version\""]; + // @gotags: json:"tracer_version" msg:"tracer_version" + string tracerVersion = 4; // runtimeID specifies V4 UUID representation of a tracer session. - string runtimeID = 5 [(gogoproto.jsontag) = "runtime_id", (gogoproto.moretags) = "msg:\"runtime_id\""]; + // @gotags: json:"runtime_id" msg:"runtime_id" + string runtimeID = 5; // chunks specifies list of containing trace chunks. - repeated TraceChunk chunks = 6 [(gogoproto.jsontag) = "chunks", (gogoproto.moretags) = "msg:\"chunks\""]; + // @gotags: json:"chunks" msg:"chunks" + repeated TraceChunk chunks = 6; // tags specifies tags common in all `chunks`. - map tags = 7 [(gogoproto.jsontag) = "tags", (gogoproto.moretags) = "msg:\"tags\""]; + // @gotags: json:"tags" msg:"tags" + map tags = 7; // env specifies `env` tag that set with the tracer. - string env = 8 [(gogoproto.jsontag) = "env", (gogoproto.moretags) = "msg:\"env\""]; + // @gotags: json:"env" msg:"env" + string env = 8; // hostname specifies hostname of where the tracer is running. - string hostname = 9 [(gogoproto.jsontag) = "hostname", (gogoproto.moretags) = "msg:\"hostname\""]; + // @gotags: json:"hostname" msg:"hostname" + string hostname = 9; // version specifies `version` tag that set with the tracer. - string appVersion = 10 [(gogoproto.jsontag) = "app_version", (gogoproto.moretags) = "msg:\"app_version\""]; + // @gotags: json:"app_version" msg:"app_version" + string appVersion = 10; } diff --git a/pkg/trace/pb/tracer_payload_gen.go b/pkg/trace/pb/tracer_payload_gen.go index 36179e47ee704..d45264b771768 100644 --- a/pkg/trace/pb/tracer_payload_gen.go +++ b/pkg/trace/pb/tracer_payload_gen.go @@ -8,7 +8,7 @@ package pb // Code generated by github.com/tinylib/msgp DO NOT EDIT. import ( - _ "github.com/gogo/protobuf/gogoproto" + // _ "github.com/gogo/protobuf/gogoproto" "github.com/tinylib/msgp/msgp" ) diff --git a/pkg/trace/pb/tracer_payload_utils.go b/pkg/trace/pb/tracer_payload_utils.go new file mode 100644 index 0000000000000..04b9d76f1d13b --- /dev/null +++ b/pkg/trace/pb/tracer_payload_utils.go @@ -0,0 +1,35 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package pb + +// traceChunkCopiedFields records the fields that are copied in ShallowCopy. +// This should match exactly the fields set in (*TraceChunk).ShallowCopy. +// This is used by tests to enforce the correctness of ShallowCopy. +var traceChunkCopiedFields = map[string]struct{}{ + "Priority": {}, + "Origin": {}, + "Spans": {}, + "Tags": {}, + "DroppedTrace": {}, +} + +// ShallowCopy returns a shallow copy of the copy-able portion of a TraceChunk. These are the +// public fields which will have a Get* method for them. The completeness of this +// method is enforced by the init function above. Instead of using pkg/proto/utils.ProtoCopier, +// which incurs heavy reflection cost for every copy at runtime, we use reflection once at +// startup to ensure our method is complete. +func (t *TraceChunk) ShallowCopy() *TraceChunk { + if t == nil { + return nil + } + return &TraceChunk{ + Priority: t.Priority, + Origin: t.Origin, + Spans: t.Spans, + Tags: t.Tags, + DroppedTrace: t.DroppedTrace, + } +} diff --git a/pkg/trace/pb/tracer_payload_vtproto.pb.go b/pkg/trace/pb/tracer_payload_vtproto.pb.go new file mode 100644 index 0000000000000..6528e324b432e --- /dev/null +++ b/pkg/trace/pb/tracer_payload_vtproto.pb.go @@ -0,0 +1,1066 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.4.0 +// source: tracer_payload.proto + +package pb + +import ( + fmt "fmt" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TraceChunk) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TraceChunk) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *TraceChunk) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DroppedTrace { + i-- + if m.DroppedTrace { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.Tags) > 0 { + for k := range m.Tags { + v := m.Tags[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Spans) > 0 { + for iNdEx := len(m.Spans) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Spans[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Origin) > 0 { + i -= len(m.Origin) + copy(dAtA[i:], m.Origin) + i = encodeVarint(dAtA, i, uint64(len(m.Origin))) + i-- + dAtA[i] = 0x12 + } + if m.Priority != 0 { + i = encodeVarint(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TracerPayload) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TracerPayload) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *TracerPayload) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.AppVersion) > 0 { + i -= len(m.AppVersion) + copy(dAtA[i:], m.AppVersion) + i = encodeVarint(dAtA, i, uint64(len(m.AppVersion))) + i-- + dAtA[i] = 0x52 + } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarint(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x4a + } + if len(m.Env) > 0 { + i -= len(m.Env) + copy(dAtA[i:], m.Env) + i = encodeVarint(dAtA, i, uint64(len(m.Env))) + i-- + dAtA[i] = 0x42 + } + if len(m.Tags) > 0 { + for k := range m.Tags { + v := m.Tags[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Chunks) > 0 { + for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Chunks[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.RuntimeID) > 0 { + i -= len(m.RuntimeID) + copy(dAtA[i:], m.RuntimeID) + i = encodeVarint(dAtA, i, uint64(len(m.RuntimeID))) + i-- + dAtA[i] = 0x2a + } + if len(m.TracerVersion) > 0 { + i -= len(m.TracerVersion) + copy(dAtA[i:], m.TracerVersion) + i = encodeVarint(dAtA, i, uint64(len(m.TracerVersion))) + i-- + dAtA[i] = 0x22 + } + if len(m.LanguageVersion) > 0 { + i -= len(m.LanguageVersion) + copy(dAtA[i:], m.LanguageVersion) + i = encodeVarint(dAtA, i, uint64(len(m.LanguageVersion))) + i-- + dAtA[i] = 0x1a + } + if len(m.LanguageName) > 0 { + i -= len(m.LanguageName) + copy(dAtA[i:], m.LanguageName) + i = encodeVarint(dAtA, i, uint64(len(m.LanguageName))) + i-- + dAtA[i] = 0x12 + } + if len(m.ContainerID) > 0 { + i -= len(m.ContainerID) + copy(dAtA[i:], m.ContainerID) + i = encodeVarint(dAtA, i, uint64(len(m.ContainerID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TraceChunk) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Priority != 0 { + n += 1 + sov(uint64(m.Priority)) + } + l = len(m.Origin) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Spans) > 0 { + for _, e := range m.Spans { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Tags) > 0 { + for k, v := range m.Tags { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + if m.DroppedTrace { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *TracerPayload) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.LanguageName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.LanguageVersion) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TracerVersion) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.RuntimeID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Chunks) > 0 { + for _, e := range m.Chunks { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Tags) > 0 { + for k, v := range m.Tags { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + l = len(m.Env) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Hostname) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.AppVersion) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *TraceChunk) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TraceChunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TraceChunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + m.Priority = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Priority |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Origin", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Origin = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Spans = append(m.Spans, &Span{}) + if err := m.Spans[len(m.Spans)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tags == nil { + m.Tags = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Tags[mapkey] = mapvalue + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DroppedTrace", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DroppedTrace = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TracerPayload) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TracerPayload: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TracerPayload: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LanguageName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LanguageName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LanguageVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LanguageVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TracerVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TracerVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuntimeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Chunks = append(m.Chunks, &TraceChunk{}) + if err := m.Chunks[len(m.Chunks)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tags == nil { + m.Tags = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Tags[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/pkg/trace/pb/utils_test.go b/pkg/trace/pb/utils_test.go new file mode 100644 index 0000000000000..43afe155a09f6 --- /dev/null +++ b/pkg/trace/pb/utils_test.go @@ -0,0 +1,56 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package pb + +import ( + fmt "fmt" + reflect "reflect" + "testing" +) + +func TestShallowCopy(t *testing.T) { + // These tests ensure that the ShallowCopy functions for Span and TraceChunk + // copy all of the available fields. + t.Run("span", func(t *testing.T) { + typ := reflect.TypeOf(&Span{}) + for i := 0; i < typ.Elem().NumField(); i++ { + field := typ.Elem().Field(i) + if field.PkgPath != `` { + continue + } + method, ok := typ.MethodByName(`Get` + field.Name) + if !ok || + method.Type.NumIn() != 1 || + method.Type.NumOut() != 1 || + method.Type.Out(0) != field.Type { + continue + } + if _, ok := spanCopiedFields[field.Name]; !ok { + panic(fmt.Sprintf("pkg/trace/pb/span_utils.go: ShallowCopy needs to be updated for new Span fields. Missing: %s", field.Name)) + } + } + }) + + t.Run("trace-chunk", func(t *testing.T) { + typ := reflect.TypeOf(&TraceChunk{}) + for i := 0; i < typ.Elem().NumField(); i++ { + field := typ.Elem().Field(i) + if field.PkgPath != `` { + continue + } + method, ok := typ.MethodByName(`Get` + field.Name) + if !ok || + method.Type.NumIn() != 1 || + method.Type.NumOut() != 1 || + method.Type.Out(0) != field.Type { + continue + } + if _, ok := traceChunkCopiedFields[field.Name]; !ok { + panic(fmt.Sprintf("pkg/trace/pb/tracer_payload_utils.go: ShallowCopy needs to be updated for new TraceChunk fields. Missing: %s", field.Name)) + } + } + }) +} diff --git a/pkg/trace/sampler/spansampler_test.go b/pkg/trace/sampler/spansampler_test.go index 579712e6cf0f0..7fa72879ee5ec 100644 --- a/pkg/trace/sampler/spansampler_test.go +++ b/pkg/trace/sampler/spansampler_test.go @@ -9,7 +9,7 @@ import ( "testing" "time" - "github.com/gogo/protobuf/proto" + "google.golang.org/protobuf/proto" "github.com/stretchr/testify/assert" diff --git a/pkg/trace/traceutil/processed_trace.go b/pkg/trace/traceutil/processed_trace.go index 97c886830805d..e3558cd02133b 100644 --- a/pkg/trace/traceutil/processed_trace.go +++ b/pkg/trace/traceutil/processed_trace.go @@ -40,13 +40,11 @@ func (pt *ProcessedTrace) Clone() *ProcessedTrace { ptClone := new(ProcessedTrace) *ptClone = *pt if pt.TraceChunk != nil { - c := new(pb.TraceChunk) - *c = *pt.TraceChunk + c := pt.TraceChunk.ShallowCopy() ptClone.TraceChunk = c } if pt.Root != nil { - r := new(pb.Span) - *r = *pt.Root + r := pt.Root.ShallowCopy() ptClone.Root = r } return ptClone diff --git a/pkg/trace/writer/trace.go b/pkg/trace/writer/trace.go index 05d9b793d2ab2..af14f5bdb8000 100644 --- a/pkg/trace/writer/trace.go +++ b/pkg/trace/writer/trace.go @@ -20,8 +20,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/trace/metrics/timing" "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/telemetry" - - "github.com/gogo/protobuf/proto" ) // pathTraces is the target host API path for delivering traces. @@ -199,7 +197,7 @@ func (w *TraceWriter) addSpans(pkg *SampledChunks) { w.flush() } if len(pkg.TracerPayload.Chunks) > 0 { - log.Tracef("Handling new tracer payload with %d spans: %v", pkg.SpanCount, pkg.TracerPayload) + log.Tracef("Writer: handling new tracer payload with %d spans: %v", pkg.SpanCount, pkg.TracerPayload) w.tracerPayloads = append(w.tracerPayloads, pkg.TracerPayload) } w.bufferedSize += size @@ -248,7 +246,7 @@ func (w *TraceWriter) flush() { TracerPayloads: w.tracerPayloads, } log.Debugf("Reported agent rates: target_tps=%v errors_tps=%v rare_sampling=%v", p.TargetTPS, p.ErrorTPS, p.RareSamplerEnabled) - b, err := proto.Marshal(&p) + b, err := p.MarshalVT() if err != nil { log.Errorf("Failed to serialize payload, data dropped: %v", err) return diff --git a/pkg/trace/writer/trace_test.go b/pkg/trace/writer/trace_test.go index 708ac753df167..f44b7bb0d6deb 100644 --- a/pkg/trace/writer/trace_test.go +++ b/pkg/trace/writer/trace_test.go @@ -9,13 +9,12 @@ import ( "compress/gzip" "io" "io/ioutil" - "reflect" "runtime" "sync" "testing" - "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/proto" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/pb" @@ -163,7 +162,7 @@ func payloadsContain(t *testing.T, payloads []*payload, sampledSpans []*SampledC var found bool for _, tracerPayload := range all.TracerPayloads { for _, trace := range tracerPayload.Chunks { - if reflect.DeepEqual(trace, ss.TracerPayload.Chunks[0]) { + if proto.Equal(trace, ss.TracerPayload.Chunks[0]) { found = true break } @@ -412,6 +411,7 @@ func BenchmarkSpanProto(b *testing.B) { }, } for n := 0; n < b.N; n++ { - s.Marshal() + //proto.Marshal(&s) + s.MarshalVT() } } diff --git a/pkg/util/cgroups/go.mod b/pkg/util/cgroups/go.mod index ad080c73c3b11..e46316f6d2d8c 100644 --- a/pkg/util/cgroups/go.mod +++ b/pkg/util/cgroups/go.mod @@ -9,18 +9,25 @@ replace ( ) require ( - github.com/DataDog/datadog-agent/pkg/util/log v0.45.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/pointer v0.45.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/log v0.46.0-rc.2 + github.com/DataDog/datadog-agent/pkg/util/pointer v0.46.0-rc.2 + github.com/containerd/cgroups v1.0.4 github.com/google/go-cmp v0.5.8 github.com/karrick/godirwalk v1.17.0 github.com/stretchr/testify v1.8.1 ) require ( - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.45.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.46.0-rc.2 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect + github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/docker/go-units v0.4.0 // indirect + github.com/godbus/dbus/v5 v5.0.4 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/opencontainers/runtime-spec v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + golang.org/x/sys v0.0.0-20210510120138-977fb7262007 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/util/cgroups/go.sum b/pkg/util/cgroups/go.sum index 7322f7623d310..0f91601f33bf2 100644 --- a/pkg/util/cgroups/go.sum +++ b/pkg/util/cgroups/go.sum @@ -1,12 +1,26 @@ github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= +github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= +github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= +github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/karrick/godirwalk v1.17.0 h1:b4kY7nqDdioR/6qnbHQyDvmA17u5G1cZ6J+CZXwSWoI= github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -16,6 +30,35 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= diff --git a/pkg/util/cgroups/memory_controller.go b/pkg/util/cgroups/memory_controller.go new file mode 100644 index 0000000000000..0e581089277c5 --- /dev/null +++ b/pkg/util/cgroups/memory_controller.go @@ -0,0 +1,179 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux +// +build linux + +package cgroups + +import ( + "fmt" + "syscall" + + "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/containerd/cgroups" +) + +const maxEpollEvents = 4 + +// MemoryController describes a cgroup based memory controller +type MemoryController struct { + efd int + memoryEvents map[int]func() +} + +// MemoryMonitor creates a cgroup memory event +type MemoryMonitor func(cgroup cgroups.Cgroup) (cgroups.MemoryEvent, func(), error) + +// MemoryPercentageThresholdMonitor monitors memory usage above a specified percentage threshold +func MemoryPercentageThresholdMonitor(cb func(), percentage uint64, swap bool) MemoryMonitor { + return func(cgroup cgroups.Cgroup) (cgroups.MemoryEvent, func(), error) { + metrics, err := cgroup.Stat(cgroups.IgnoreNotExist) + if err != nil { + return nil, nil, fmt.Errorf("can't get cgroup metrics: %w", err) + } + + if metrics.Memory == nil || metrics.Memory.Usage == nil { + return nil, nil, fmt.Errorf("can't get cgroup memory metrics: %w", err) + } + + return cgroups.MemoryThresholdEvent(metrics.Memory.Usage.Limit*percentage/100, swap), cb, nil + } +} + +// MemoryThresholdMonitor monitors memory usage above a specified threshold +func MemoryThresholdMonitor(cb func(), limit uint64, swap bool) MemoryMonitor { + return func(cgroup cgroups.Cgroup) (cgroups.MemoryEvent, func(), error) { + return cgroups.MemoryThresholdEvent(limit, swap), cb, nil + } +} + +// MemoryPressureMonitor monitors memory pressure levels +func MemoryPressureMonitor(cb func(), level string) MemoryMonitor { + return func(cgroup cgroups.Cgroup) (cgroups.MemoryEvent, func(), error) { + return cgroups.MemoryPressureEvent(cgroups.MemoryPressureLevel(level), cgroups.LocalMode), cb, nil + } +} + +type hostSubsystem struct { + cgroups.Subsystem +} + +func hostHierarchy(hierarchy cgroups.Hierarchy) cgroups.Hierarchy { + return func() ([]cgroups.Subsystem, error) { + subsystems, err := hierarchy() + if err != nil { + return nil, err + } + + for i, subsystem := range subsystems { + subsystems[i] = &hostSubsystem{ + Subsystem: subsystem, + } + } + + return subsystems, nil + } +} + +// NewMemoryController creates a new systemd cgroup based memory controller +func NewMemoryController(kind string, containerized bool, monitors ...MemoryMonitor) (*MemoryController, error) { + path := cgroups.NestedPath("") + + var cgroupHierarchy cgroups.Hierarchy + switch kind { + case "systemd": + cgroupHierarchy = cgroups.Systemd + case "v1": + cgroupHierarchy = cgroups.V1 + default: + return nil, fmt.Errorf("unsupported cgroup hierarchy '%s'", kind) + } + + if containerized { + cgroupHierarchy = hostHierarchy(cgroupHierarchy) + } + + cgroup, err := cgroups.Load(cgroupHierarchy, path) + if err != nil { + return nil, fmt.Errorf("can't open memory cgroup: %w", err) + } + + epfd, err := syscall.EpollCreate1(0) + if err != nil { + return nil, err + } + + mc := &MemoryController{ + efd: epfd, + memoryEvents: make(map[int]func()), + } + + for _, monitor := range monitors { + memoryEvent, cb, err := monitor(cgroup) + if err != nil { + mc.Stop() + return nil, err + } + + efd, err := cgroup.RegisterMemoryEvent(memoryEvent) + if err != nil { + mc.Stop() + return nil, fmt.Errorf("can't register memory event: %w", err) + } + + var event syscall.EpollEvent + event.Events = syscall.EPOLLIN + event.Fd = int32(efd) + + if err := syscall.EpollCtl(epfd, syscall.EPOLL_CTL_ADD, int(efd), &event); err != nil { + mc.Stop() + return nil, fmt.Errorf("can't add file descriptor to epoll: %w", err) + } + + mc.memoryEvents[int(efd)] = cb + } + + return mc, nil +} + +// Start listening for events +func (mc *MemoryController) Start() { + go func() { + var buf [256]byte + var events [maxEpollEvents]syscall.EpollEvent + + EPOLLWAIT: + for { + nevents, err := syscall.EpollWait(mc.efd, events[:], -1) + if err != nil { + log.Warnf("Error while waiting for memory controller events: %v", err) + break + } + + for ev := 0; ev < nevents; ev++ { + fd := int(events[ev].Fd) + + if _, err := syscall.Read(fd, buf[:]); err != nil { + log.Warnf("Error while reading memory controller event: %v", err) + continue EPOLLWAIT + } + + mc.memoryEvents[fd]() + } + } + }() +} + +// Stop the memory controller +func (mc *MemoryController) Stop() { + for fd := range mc.memoryEvents { + syscall.Close(fd) + } + + if mc.efd != 0 { + syscall.Close(mc.efd) + } +} diff --git a/pkg/util/cloudproviders/cloudfoundry/cccache.go b/pkg/util/cloudproviders/cloudfoundry/cccache.go index cdd28ce3a9005..79325c80d02fc 100644 --- a/pkg/util/cloudproviders/cloudfoundry/cccache.go +++ b/pkg/util/cloudproviders/cloudfoundry/cccache.go @@ -15,7 +15,7 @@ import ( "sync" "time" - "github.com/cloudfoundry-community/go-cfclient" + "github.com/cloudfoundry-community/go-cfclient/v2" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -463,7 +463,7 @@ func (ccc *CCCache) listApplications(wg *sync.WaitGroup, appsMap *map[string]*cf sidecars, err := ccc.ccAPIClient.ListSidecarsByApp(query, app.GUID) if err != nil { log.Errorf("Failed listing sidecars from cloud controller: %v", err) - return + continue } // skip apps without sidecars if len(sidecars) == 0 { diff --git a/pkg/util/cloudproviders/cloudfoundry/cccache_test.go b/pkg/util/cloudproviders/cloudfoundry/cccache_test.go index 90f79d8294513..9d1c842f1ee50 100644 --- a/pkg/util/cloudproviders/cloudfoundry/cccache_test.go +++ b/pkg/util/cloudproviders/cloudfoundry/cccache_test.go @@ -15,7 +15,7 @@ import ( "testing" "time" - "github.com/cloudfoundry-community/go-cfclient" + "github.com/cloudfoundry-community/go-cfclient/v2" "github.com/stretchr/testify/assert" ) diff --git a/pkg/util/cloudproviders/cloudfoundry/types.go b/pkg/util/cloudproviders/cloudfoundry/types.go index 78323829fd6f2..f002113422b1a 100644 --- a/pkg/util/cloudproviders/cloudfoundry/types.go +++ b/pkg/util/cloudproviders/cloudfoundry/types.go @@ -18,7 +18,7 @@ import ( "strconv" "strings" - "github.com/cloudfoundry-community/go-cfclient" + "github.com/cloudfoundry-community/go-cfclient/v2" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -583,7 +583,7 @@ func (c *CFClient) ListSidecarsByApp(query url.Values, appGUID string) ([]CFSide r := c.NewRequest("GET", requestURL+"?"+query.Encode()) resp, err := c.DoRequest(r) if err != nil { - return nil, fmt.Errorf("Error requesting sidecars for app: %s", err) + return nil, fmt.Errorf("Error requesting sidecars for app %s: %s", appGUID, err) } if resp.StatusCode != http.StatusOK { diff --git a/pkg/util/cloudproviders/cloudfoundry/types_test.go b/pkg/util/cloudproviders/cloudfoundry/types_test.go index 6c8d5df5a7678..7ef1c549da5d8 100644 --- a/pkg/util/cloudproviders/cloudfoundry/types_test.go +++ b/pkg/util/cloudproviders/cloudfoundry/types_test.go @@ -13,7 +13,7 @@ import ( "testing" "code.cloudfoundry.org/bbs/models" - "github.com/cloudfoundry-community/go-cfclient" + "github.com/cloudfoundry-community/go-cfclient/v2" "github.com/stretchr/testify/assert" ) diff --git a/pkg/util/clusteragent/clusteragent.go b/pkg/util/clusteragent/clusteragent.go index 01488c1d2b349..d22331aee7581 100644 --- a/pkg/util/clusteragent/clusteragent.go +++ b/pkg/util/clusteragent/clusteragent.go @@ -344,7 +344,7 @@ func (c *DCAClient) doJSONQuery(ctx context.Context, path, method string, body i err = json.Unmarshal(respBody, obj) if err != nil { - return fmt.Errorf("failed to unmarshal JSON from URL: %s, err: %w", path, err) + return fmt.Errorf("failed to unmarshal JSON from URL: %s, err: %w, raw message: %q", path, err, respBody) } return nil diff --git a/pkg/util/containers/metrics/containerd/collector_cgroupv1.go b/pkg/util/containers/metrics/containerd/collector_cgroupv1.go index 8f76be6f01d11..b7f5b8c50ce2b 100644 --- a/pkg/util/containers/metrics/containerd/collector_cgroupv1.go +++ b/pkg/util/containers/metrics/containerd/collector_cgroupv1.go @@ -65,6 +65,7 @@ func getMemoryStatsCgroupV1(memStat *v1.MemoryStat) *provider.ContainerMemStats if memStat.Usage != nil { res.UsageTotal = pointer.Ptr(float64(memStat.Usage.Usage)) + res.WorkingSet = pointer.Ptr(float64(memStat.Usage.Usage - memStat.InactiveFile)) res.Limit = pointer.Ptr(float64(memStat.Usage.Limit)) } diff --git a/pkg/util/containers/metrics/containerd/collector_cgroupv2.go b/pkg/util/containers/metrics/containerd/collector_cgroupv2.go index a3e343b776841..fd1a0b9545e9b 100644 --- a/pkg/util/containers/metrics/containerd/collector_cgroupv2.go +++ b/pkg/util/containers/metrics/containerd/collector_cgroupv2.go @@ -54,6 +54,7 @@ func getMemoryStatsCgroupV2(memStat *v2.MemoryStat, memEvents *v2.MemoryEvents) res := provider.ContainerMemStats{ UsageTotal: pointer.Ptr(float64(memStat.Usage)), + WorkingSet: pointer.Ptr(float64(memStat.Usage - memStat.InactiveFile)), RSS: pointer.Ptr(float64(memStat.Anon)), Cache: pointer.Ptr(float64(memStat.File)), KernelMemory: pointer.Ptr(float64(memStat.Slab + memStat.KernelStack)), diff --git a/pkg/util/containers/metrics/containerd/collector_linux_test.go b/pkg/util/containers/metrics/containerd/collector_linux_test.go index 1fc6b81f9b239..db02d6470ede8 100644 --- a/pkg/util/containers/metrics/containerd/collector_linux_test.go +++ b/pkg/util/containers/metrics/containerd/collector_linux_test.go @@ -43,8 +43,9 @@ func TestGetContainerStats_Containerd(t *testing.T) { }, }, Memory: &v1.MemoryStat{ - Cache: 20, - RSS: 100, + Cache: 20, + RSS: 100, + InactiveFile: 10, Usage: &v1.MemoryEntry{ Limit: 2000, Usage: 1000, @@ -123,13 +124,14 @@ func TestGetContainerStats_Containerd(t *testing.T) { NrThrottled: 1, }, Memory: &v2.MemoryStat{ - File: 20, - Anon: 100, - Usage: 1000, - UsageLimit: 2000, - SwapUsage: 10, - Slab: 400, - KernelStack: 100, + File: 20, + Anon: 100, + InactiveFile: 10, + Usage: 1000, + UsageLimit: 2000, + SwapUsage: 10, + Slab: 400, + KernelStack: 100, }, Io: &v2.IOStat{ Usage: []*v2.IOEntry{ @@ -176,6 +178,7 @@ func TestGetContainerStats_Containerd(t *testing.T) { }, Memory: &provider.ContainerMemStats{ UsageTotal: pointer.Ptr(1000.0), + WorkingSet: pointer.Ptr(990.0), KernelMemory: pointer.Ptr(500.0), Limit: pointer.Ptr(2000.0), RSS: pointer.Ptr(100.0), @@ -220,6 +223,7 @@ func TestGetContainerStats_Containerd(t *testing.T) { }, Memory: &provider.ContainerMemStats{ UsageTotal: pointer.Ptr(1000.0), + WorkingSet: pointer.Ptr(990.0), KernelMemory: pointer.Ptr(500.0), Limit: pointer.Ptr(2000.0), RSS: pointer.Ptr(100.0), diff --git a/pkg/util/containers/metrics/cri/collector.go b/pkg/util/containers/metrics/cri/collector.go index 572757d535e8d..d6cd6d9ce8c8c 100644 --- a/pkg/util/containers/metrics/cri/collector.go +++ b/pkg/util/containers/metrics/cri/collector.go @@ -63,15 +63,25 @@ func (collector *criCollector) GetContainerStats(containerNS, containerID string return nil, err } - return &provider.ContainerStats{ - Timestamp: time.Now(), - CPU: &provider.ContainerCPUStats{ - Total: pointer.Ptr(float64(stats.GetCpu().GetUsageCoreNanoSeconds().GetValue())), - }, - Memory: &provider.ContainerMemStats{ - UsageTotal: pointer.Ptr(float64(stats.GetMemory().GetWorkingSetBytes().GetValue())), - }, - }, nil + containerStats := &provider.ContainerStats{} + + if stats.Cpu != nil { + containerStats.Timestamp = time.Unix(0, stats.Cpu.Timestamp) + containerStats.CPU = &provider.ContainerCPUStats{ + Total: convertRuntimeUInt64Value(stats.Cpu.UsageCoreNanoSeconds), + } + } + + if stats.Memory != nil { + containerStats.Timestamp = time.Unix(0, stats.Memory.Timestamp) + containerStats.Memory = &provider.ContainerMemStats{ + UsageTotal: convertRuntimeUInt64Value(stats.Memory.UsageBytes), + WorkingSet: convertRuntimeUInt64Value(stats.Memory.WorkingSetBytes), + RSS: convertRuntimeUInt64Value(stats.Memory.RssBytes), + } + } + + return containerStats, nil } // GetContainerOpenFilesCount returns open files count by container ID. @@ -106,3 +116,11 @@ func (collector *criCollector) getCriContainerStats(containerID string) (*v1.Con return stats, nil } + +func convertRuntimeUInt64Value(v *v1.UInt64Value) *float64 { + if v == nil { + return nil + } + + return pointer.Ptr(float64(v.GetValue())) +} diff --git a/pkg/util/containers/metrics/cri/collector_test.go b/pkg/util/containers/metrics/cri/collector_test.go index 490ef912020ca..97443fec86b80 100644 --- a/pkg/util/containers/metrics/cri/collector_test.go +++ b/pkg/util/containers/metrics/cri/collector_test.go @@ -36,6 +36,12 @@ func TestGetContainerStats(t *testing.T) { WorkingSetBytes: &pb.UInt64Value{ Value: 1024, }, + UsageBytes: &pb.UInt64Value{ + Value: 2048, + }, + RssBytes: &pb.UInt64Value{ + Value: 512, + }, }, }, nil, @@ -49,7 +55,9 @@ func TestGetContainerStats(t *testing.T) { assert.NoError(t, err) assert.Equal(t, pointer.Ptr(1000.0), stats.CPU.Total) - assert.Equal(t, pointer.Ptr(1024.0), stats.Memory.UsageTotal) + assert.Equal(t, pointer.Ptr(1024.0), stats.Memory.WorkingSet) + assert.Equal(t, pointer.Ptr(2048.0), stats.Memory.UsageTotal) + assert.Equal(t, pointer.Ptr(512.0), stats.Memory.RSS) } func TestGetContainerNetworkStats(t *testing.T) { diff --git a/pkg/util/containers/metrics/docker/collector_linux.go b/pkg/util/containers/metrics/docker/collector_linux.go index 08edfaa0d1f67..8dd829d894f09 100644 --- a/pkg/util/containers/metrics/docker/collector_linux.go +++ b/pkg/util/containers/metrics/docker/collector_linux.go @@ -46,21 +46,21 @@ func convertMemoryStats(memStats *types.MemoryStats) *provider.ContainerMemStats UsageTotal: pointer.Ptr(float64(memStats.Usage)), Limit: pointer.Ptr(float64(memStats.Limit)), OOMEvents: pointer.Ptr(float64(memStats.Failcnt)), + // keys are cgroupv1, cgroupv2 + RSS: getFieldFromMap(memStats.Stats, "total_rss", "anon"), + Cache: getFieldFromMap(memStats.Stats, "total_cache", "file"), } - if rss, found := memStats.Stats["rss"]; found { - containerMemStats.RSS = pointer.Ptr(float64(rss)) - } - - if cache, found := memStats.Stats["cache"]; found { - containerMemStats.Cache = pointer.Ptr(float64(cache)) + inactive_file := getFieldFromMap(memStats.Stats, "total_inactive_file", "inactive_file") + if inactive_file != nil { + containerMemStats.WorkingSet = pointer.Ptr(*containerMemStats.UsageTotal - *inactive_file) } // `kernel_stack` and `slab`, which are used to compute `KernelMemory` are available only with cgroup v2 - if kernelStack, found := memStats.Stats["kernel_stack"]; found { - if slab, found := memStats.Stats["slab"]; found { - containerMemStats.KernelMemory = pointer.Ptr(float64(kernelStack + slab)) - } + kernelStack := getFieldFromMap(memStats.Stats, "", "kernel_stack") + slab := getFieldFromMap(memStats.Stats, "", "slab") + if kernelStack != nil && slab != nil { + containerMemStats.KernelMemory = pointer.Ptr(*kernelStack + *slab) } return containerMemStats @@ -160,3 +160,23 @@ func computeCPULimit(containerStats *provider.ContainerStats, spec *types.Contai containerStats.CPU.Limit = &cpuLimit } + +// keyV1 is key name from cgroup V1 +// keyV2 is key name from cgroup V2 +func getFieldFromMap(stats map[string]uint64, keys ...string) *float64 { + var val uint64 + var found bool + + for _, key := range keys { + val, found = stats[key] + if found { + break + } + } + + if !found { + return nil + } + + return pointer.Ptr(float64(val)) +} diff --git a/pkg/util/containers/metrics/docker/collector_linux_test.go b/pkg/util/containers/metrics/docker/collector_linux_test.go index 3c7457e4bab91..964bb748c4b8b 100644 --- a/pkg/util/containers/metrics/docker/collector_linux_test.go +++ b/pkg/util/containers/metrics/docker/collector_linux_test.go @@ -70,8 +70,8 @@ func Test_convertMemoryStats(t *testing.T) { Limit: 43, Failcnt: 44, Stats: map[string]uint64{ - "rss": 45, - "cache": 46, + "total_rss": 45, + "total_cache": 46, "kernel_stack": 47, "slab": 48, }, diff --git a/pkg/util/containers/metrics/kubelet/collector.go b/pkg/util/containers/metrics/kubelet/collector.go index a4e415244dba9..b7e215669087e 100644 --- a/pkg/util/containers/metrics/kubelet/collector.go +++ b/pkg/util/containers/metrics/kubelet/collector.go @@ -257,10 +257,12 @@ func convertContainerStats(kubeContainerStats *v1alpha1.ContainerStats, outConta RSS: pointer.UIntPtrToFloatPtr(kubeContainerStats.Memory.RSSBytes), } - // On Linux `UsageBytes` is set. On Windows only `WorkingSetBytes` is set - if outContainerStats.Memory.UsageTotal == nil && kubeContainerStats.Memory.WorkingSetBytes != nil { + // On Linux `RSS` is set. On Windows only `WorkingSetBytes` is set + if outContainerStats.Memory.RSS == nil { outContainerStats.Memory.UsageTotal = pointer.UIntPtrToFloatPtr(kubeContainerStats.Memory.WorkingSetBytes) outContainerStats.Memory.PrivateWorkingSet = pointer.UIntPtrToFloatPtr(kubeContainerStats.Memory.WorkingSetBytes) + } else { + outContainerStats.Memory.WorkingSet = pointer.UIntPtrToFloatPtr(kubeContainerStats.Memory.WorkingSetBytes) } } } diff --git a/pkg/util/containers/metrics/kubelet/collector_test.go b/pkg/util/containers/metrics/kubelet/collector_test.go index 98ed7c8dc40ed..647ba1dcf6e94 100644 --- a/pkg/util/containers/metrics/kubelet/collector_test.go +++ b/pkg/util/containers/metrics/kubelet/collector_test.go @@ -73,6 +73,7 @@ func TestKubeletCollectorLinux(t *testing.T) { Memory: &provider.ContainerMemStats{ UsageTotal: pointer.Ptr(12713984.0), RSS: pointer.Ptr(12238848.0), + WorkingSet: pointer.Ptr(12713984.0), }, }, cID1Stats) @@ -88,6 +89,7 @@ func TestKubeletCollectorLinux(t *testing.T) { Memory: &provider.ContainerMemStats{ UsageTotal: pointer.Ptr(6705152.0), RSS: pointer.Ptr(6119424.0), + WorkingSet: pointer.Ptr(6705152.0), }, }, cID2Stats) @@ -103,6 +105,7 @@ func TestKubeletCollectorLinux(t *testing.T) { Memory: &provider.ContainerMemStats{ UsageTotal: pointer.Ptr(11325440.0), RSS: pointer.Ptr(10797056.0), + WorkingSet: pointer.Ptr(11325440.0), }, }, cID3Stats) diff --git a/pkg/util/containers/metrics/mock/mock_samples.go b/pkg/util/containers/metrics/mock/mock_samples.go index 7be961e34dd89..40cd64c05a568 100644 --- a/pkg/util/containers/metrics/mock/mock_samples.go +++ b/pkg/util/containers/metrics/mock/mock_samples.go @@ -47,6 +47,7 @@ func GetFullSampleContainerEntry() ContainerEntry { SwapLimit: pointer.Ptr(500.0), Softlimit: pointer.Ptr(40000.0), RSS: pointer.Ptr(300.0), + WorkingSet: pointer.Ptr(350.0), Cache: pointer.Ptr(200.0), Swap: pointer.Ptr(0.0), OOMEvents: pointer.Ptr(10.0), diff --git a/pkg/util/containers/metrics/provider/types.go b/pkg/util/containers/metrics/provider/types.go index cd9474dcc0624..b79cced920713 100644 --- a/pkg/util/containers/metrics/provider/types.go +++ b/pkg/util/containers/metrics/provider/types.go @@ -22,6 +22,7 @@ type ContainerMemStats struct { SwapLimit *float64 // Memory+Swap Limit (>= Limit) // Linux-only fields + WorkingSet *float64 // Following cAdvisor/Kubernetes: defined as UsageTotal - InactiveFiles RSS *float64 Cache *float64 OOMEvents *float64 // Number of events where memory allocation failed diff --git a/pkg/util/containers/metrics/system/collector_linux.go b/pkg/util/containers/metrics/system/collector_linux.go index 8e8adaea2134e..550ad94db3e28 100644 --- a/pkg/util/containers/metrics/system/collector_linux.go +++ b/pkg/util/containers/metrics/system/collector_linux.go @@ -232,6 +232,11 @@ func buildMemoryStats(cgs *cgroups.MemoryStats) *provider.ContainerMemStats { convertField(cgs.OOMEvents, &cs.OOMEvents) convertFieldAndUnit(cgs.PSISome.Total, &cs.PartialStallTime, float64(time.Microsecond)) + // Compute complex fields + if cgs.UsageTotal != nil && cgs.InactiveFile != nil { + cs.WorkingSet = pointer.Ptr(float64(*cgs.UsageTotal - *cgs.InactiveFile)) + } + return cs } diff --git a/pkg/util/containers/metrics/system/collector_linux_test.go b/pkg/util/containers/metrics/system/collector_linux_test.go index 032d75862c0f3..1d2097c893751 100644 --- a/pkg/util/containers/metrics/system/collector_linux_test.go +++ b/pkg/util/containers/metrics/system/collector_linux_test.go @@ -63,6 +63,7 @@ func TestBuildContainerMetrics(t *testing.T) { LowThreshold: pointer.Ptr(uint64(40000)), RSS: pointer.Ptr(uint64(300)), Cache: pointer.Ptr(uint64(200)), + InactiveFile: pointer.Ptr(uint64(10)), Swap: pointer.Ptr(uint64(0)), SwapLimit: pointer.Ptr(uint64(500)), OOMEvents: pointer.Ptr(uint64(10)), @@ -108,6 +109,7 @@ func TestBuildContainerMetrics(t *testing.T) { }, Memory: &provider.ContainerMemStats{ UsageTotal: pointer.Ptr(100.0), + WorkingSet: pointer.Ptr(90.0), KernelMemory: pointer.Ptr(40.0), Limit: pointer.Ptr(42000.0), Softlimit: pointer.Ptr(40000.0), diff --git a/pkg/util/docker/event_stream.go b/pkg/util/docker/event_stream.go index 667b8625ac07c..0fe7c8a10ff59 100644 --- a/pkg/util/docker/event_stream.go +++ b/pkg/util/docker/event_stream.go @@ -157,7 +157,7 @@ func eventFilters() filters.Args { res.Add("event", containerEventAction) } - if config.Datadog.GetBool("container_image_collection.metadata.enabled") { + if config.Datadog.GetBool("container_image.enabled") { res.Add("type", events.ImageEventType) for _, imageEventAction := range imageEventActions { res.Add("event", imageEventAction) diff --git a/pkg/util/docker/util_common.go b/pkg/util/docker/util_common.go index 520e15bbdf7a5..30a2b37ea6d04 100644 --- a/pkg/util/docker/util_common.go +++ b/pkg/util/docker/util_common.go @@ -11,7 +11,7 @@ import ( var ( // ErrNotImplemented is the "not implemented" error given by `gopsutil` when an - // OS doesn't support and API. Unfortunately it's in an internal package so + // OS doesn't support an API. Unfortunately it's in an internal package so // we can't import it so we'll copy it here. ErrNotImplemented = errors.New("not implemented yet") diff --git a/pkg/util/docker/util_docker.go b/pkg/util/docker/util_docker.go index bf23fe24795be..bb1721be6bb6c 100644 --- a/pkg/util/docker/util_docker.go +++ b/pkg/util/docker/util_docker.go @@ -11,20 +11,21 @@ import ( "fmt" "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/volume" ) // buildDockerFilter creates a filter.Args object from an even // number of strings, used as key, value pairs // An empty "catch-all" filter can be created by passing no argument -func buildDockerFilter(args ...string) (filters.Args, error) { +func buildDockerFilter(args ...string) (volume.ListOptions, error) { filter := filters.NewArgs() if len(args)%2 != 0 { - return filter, fmt.Errorf("an even number of arguments is required") + return volume.ListOptions{Filters: filter}, fmt.Errorf("an even number of arguments is required") } for i := 0; i < len(args); i += 2 { filter.Add(args[i], args[i+1]) } - return filter, nil + return volume.ListOptions{Filters: filter}, nil } // GetInspectCacheKey returns the key to a given container ID inspect in the agent cache diff --git a/pkg/util/docker/util_docker_test.go b/pkg/util/docker/util_docker_test.go index 4c99b8740c6c9..af46c57fcab2c 100644 --- a/pkg/util/docker/util_docker_test.go +++ b/pkg/util/docker/util_docker_test.go @@ -14,21 +14,21 @@ import ( ) func TestBuildDockerFilterOddNumber(t *testing.T) { - filter, err := buildDockerFilter("test") + opt, err := buildDockerFilter("test") assert.NotNil(t, err) - assert.Equal(t, 0, filter.Len()) + assert.Equal(t, 0, opt.Filters.Len()) } func TestBuildDockerFilterOK(t *testing.T) { - filter, err := buildDockerFilter("k1", "v1", "k2", "v2") + opt, err := buildDockerFilter("k1", "v1", "k2", "v2") assert.Nil(t, err) - assert.Equal(t, 2, filter.Len()) - assert.Equal(t, []string{"v1"}, filter.Get("k1")) - assert.Equal(t, []string{"v2"}, filter.Get("k2")) + assert.Equal(t, 2, opt.Filters.Len()) + assert.Equal(t, []string{"v1"}, opt.Filters.Get("k1")) + assert.Equal(t, []string{"v2"}, opt.Filters.Get("k2")) } func TestBuildDockerFilterEmptyOK(t *testing.T) { - filter, err := buildDockerFilter() + opt, err := buildDockerFilter() assert.Nil(t, err) - assert.Equal(t, 0, filter.Len()) + assert.Equal(t, 0, opt.Filters.Len()) } diff --git a/pkg/util/http/transport.go b/pkg/util/http/transport.go index a088e0e0bb30a..2a4f72796b9a2 100644 --- a/pkg/util/http/transport.go +++ b/pkg/util/http/transport.go @@ -122,7 +122,7 @@ func CreateHTTPTransport() *http.Transport { MaxIdleConns: 100, MaxIdleConnsPerHost: 5, // This parameter is set to avoid connections sitting idle in the pool indefinitely - IdleConnTimeout: 90 * time.Second, + IdleConnTimeout: 45 * time.Second, TLSHandshakeTimeout: 10 * time.Second, ExpectContinueTimeout: 1 * time.Second, } diff --git a/pkg/util/kernel/version.go b/pkg/util/kernel/version.go index 6b5a0ef5a554f..e0a85e026ffec 100644 --- a/pkg/util/kernel/version.go +++ b/pkg/util/kernel/version.go @@ -49,6 +49,15 @@ func HostVersion() (Version, error) { return Version(lvc), nil } +// MustHostVersion returns the running kernel version of the host +func MustHostVersion() Version { + lvc, err := features.LinuxVersionCode() + if err != nil { + panic(err) + } + return Version(lvc) +} + // ParseVersion parses a string in the format of x.x.x to a Version func ParseVersion(s string) Version { var a, b, c byte diff --git a/pkg/util/kubernetes/apiserver/leaderelection/leaderelection.go b/pkg/util/kubernetes/apiserver/leaderelection/leaderelection.go index 07de956ae2034..71d78d6c5a332 100644 --- a/pkg/util/kubernetes/apiserver/leaderelection/leaderelection.go +++ b/pkg/util/kubernetes/apiserver/leaderelection/leaderelection.go @@ -11,10 +11,10 @@ import ( "context" "encoding/json" "fmt" - "os" "sync" "time" + "golang.org/x/mod/semver" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" @@ -113,9 +113,9 @@ func (le *LeaderEngine) init() error { var err error if le.HolderIdentity == "" { - le.HolderIdentity, err = os.Hostname() + le.HolderIdentity, err = getSelfPodName() if err != nil { - log.Debugf("cannot get hostname: %s", err) + log.Debugf("cannot get pod name: %s", err) return err } } @@ -136,6 +136,11 @@ func (le *LeaderEngine) init() error { return err } + serverVersion, err := common.KubeServerVersion(apiClient.DiscoveryCl, 10*time.Second) + if err == nil && semver.IsValid(serverVersion.String()) && semver.Compare(serverVersion.String(), "v1.14.0") < 0 { + log.Warn("[DEPRECATION WARNING] DataDog will drop support of Kubernetes older than v1.14. Please update to a newer version to ensure proper functionality and security.") + } + le.coreClient = apiClient.Cl.CoreV1() // Will be required once we migrate to Kubernetes deps >= 0.24 le.coordClient = nil diff --git a/pkg/util/kubernetes/apiserver/leaderelection/leaderelection_linux.go b/pkg/util/kubernetes/apiserver/leaderelection/leaderelection_linux.go new file mode 100644 index 0000000000000..4aabf90c839b6 --- /dev/null +++ b/pkg/util/kubernetes/apiserver/leaderelection/leaderelection_linux.go @@ -0,0 +1,41 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023-present Datadog, Inc. + +//go:build kubeapiserver && linux + +package leaderelection + +import ( + "fmt" + "os" + + "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/DataDog/datadog-agent/pkg/util/system" +) + +func getSelfPodName() (string, error) { + if podName, ok := os.LookupEnv("DD_POD_NAME"); ok { + return podName, nil + } + + selfUTSInode, err := system.GetProcessNamespaceInode("/proc", "self", "uts") + if err != nil { + // If we are not able to gather our own UTS Inode, in doubt, authorize fallback to `os.Hostname()` + log.Warnf("Unable to get self UTS inode") + return os.Hostname() + } + + hostUTS := system.IsProcessHostUTSNamespace("/proc", selfUTSInode) + if hostUTS == nil { + // In doubt, authorize fallback to `os.Hostname()` + return os.Hostname() + } + + if *hostUTS { + return "", fmt.Errorf("DD_POD_NAME is not set and running in host UTS namespace; cannot reliably determine self pod name") + } + + return os.Hostname() +} diff --git a/pkg/util/kubernetes/apiserver/leaderelection/leaderelection_nolinux.go b/pkg/util/kubernetes/apiserver/leaderelection/leaderelection_nolinux.go new file mode 100644 index 0000000000000..851c4dcd1b9b8 --- /dev/null +++ b/pkg/util/kubernetes/apiserver/leaderelection/leaderelection_nolinux.go @@ -0,0 +1,20 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023-present Datadog, Inc. + +//go:build kubeapiserver && !linux + +package leaderelection + +import ( + "os" +) + +func getSelfPodName() (string, error) { + if podName, ok := os.LookupEnv("DD_POD_NAME"); ok { + return podName, nil + } + + return os.Hostname() +} diff --git a/pkg/util/kubernetes/apiserver/metadata_controller.go b/pkg/util/kubernetes/apiserver/metadata_controller.go index 9789878398046..31a5aead88e7a 100644 --- a/pkg/util/kubernetes/apiserver/metadata_controller.go +++ b/pkg/util/kubernetes/apiserver/metadata_controller.go @@ -315,38 +315,6 @@ func GetPodMetadataNames(nodeName, ns, podName string) ([]string, error) { return metaList, nil } -func getNode(as *APIClient, nodeName string) (*corev1.Node, error) { - if !config.Datadog.GetBool("kubernetes_collect_metadata_tags") { - return nil, log.Errorf("Metadata collection is disabled on the Cluster Agent") - } - node, err := as.InformerFactory.Core().V1().Nodes().Lister().Get(nodeName) - if err != nil { - return nil, err - } - if node == nil { - return nil, fmt.Errorf("cannot get node %s from the informer's cache", nodeName) - } - return node, nil -} - -// GetNodeLabels retrieves the labels of the queried node from the cache of the shared informer. -func GetNodeLabels(as *APIClient, nodeName string) (map[string]string, error) { - node, err := getNode(as, nodeName) - if err != nil { - return nil, err - } - return node.Labels, nil -} - -// GetNodeAnnotations retrieves the annotations of the queried node from the cache of the shared informer. -func GetNodeAnnotations(as *APIClient, nodeName string) (map[string]string, error) { - node, err := getNode(as, nodeName) - if err != nil { - return nil, err - } - return node.Annotations, nil -} - // GetNamespaceLabels retrieves the labels of the queried namespace from the cache of the shared informer. func GetNamespaceLabels(nsName string) (map[string]string, error) { if !config.Datadog.GetBool("kubernetes_collect_metadata_tags") { diff --git a/pkg/util/kubernetes/kubelet/kubelet_common.go b/pkg/util/kubernetes/kubelet/kubelet_common.go index 1137cc180f021..6c18062856e9a 100644 --- a/pkg/util/kubernetes/kubelet/kubelet_common.go +++ b/pkg/util/kubernetes/kubelet/kubelet_common.go @@ -31,6 +31,12 @@ var ( // KubePodTaggerEntityPrefix is the tagger entity prefix for Kubernetes pods KubePodTaggerEntityPrefix = KubePodTaggerEntityName + containers.EntitySeparator + + // KubeNodeTaggerEntityName is the tagger entity name for Kubernetes nodes + KubeNodeTaggerEntityName = "kubernetes_node_uid" + + // KubeNodeTaggerEntityPrefix is the tagger entity prefix for Kubernetes pods + KubeNodeTaggerEntityPrefix = KubeNodeTaggerEntityName + containers.EntitySeparator ) // PodUIDToEntityName returns a prefixed entity name from a pod UID @@ -49,6 +55,14 @@ func PodUIDToTaggerEntityName(uid string) string { return KubePodTaggerEntityPrefix + uid } +// NodeUIDToTaggerEntityName returns a prefixed tagger entity name from a node UID +func NodeUIDToTaggerEntityName(uid string) string { + if uid == "" { + return "" + } + return KubeNodeTaggerEntityPrefix + uid +} + // ParseMetricFromRaw parses a metric from raw prometheus text func ParseMetricFromRaw(raw []byte, metric string) (string, error) { bytesReader := bytes.NewReader(raw) diff --git a/pkg/util/log/go.mod b/pkg/util/log/go.mod index 54b499786178e..c3a794c44518c 100644 --- a/pkg/util/log/go.mod +++ b/pkg/util/log/go.mod @@ -5,7 +5,7 @@ go 1.18 replace github.com/DataDog/datadog-agent/pkg/util/scrubber => ../scrubber require ( - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.45.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.46.0-rc.2 github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 github.com/stretchr/testify v1.8.1 go.uber.org/zap v1.22.0 diff --git a/pkg/util/process_file_stats_others.go b/pkg/util/process_file_stats_others.go index 1af2b27abeb1c..86d4e17210bc8 100644 --- a/pkg/util/process_file_stats_others.go +++ b/pkg/util/process_file_stats_others.go @@ -10,7 +10,7 @@ package util import "errors" // ErrNotImplemented is the "not implemented" error given by `gopsutil` when an -// OS doesn't support and API. Unfortunately it's in an internal package so +// OS doesn't support an API. Unfortunately it's in an internal package so // we can't import it so we'll copy it here. var ErrNotImplemented = errors.New("not implemented yet") diff --git a/pkg/proto/utils/tagger.go b/pkg/util/proto/tagger.go similarity index 99% rename from pkg/proto/utils/tagger.go rename to pkg/util/proto/tagger.go index 4edc5eeedd408..74c269ec41174 100644 --- a/pkg/proto/utils/tagger.go +++ b/pkg/util/proto/tagger.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-2020 Datadog, Inc. -package utils +package proto import ( "errors" diff --git a/pkg/proto/utils/workloadmeta.go b/pkg/util/proto/workloadmeta.go similarity index 99% rename from pkg/proto/utils/workloadmeta.go rename to pkg/util/proto/workloadmeta.go index c69dd64be1d50..0d9d9f1249990 100644 --- a/pkg/proto/utils/workloadmeta.go +++ b/pkg/util/proto/workloadmeta.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-2020 Datadog, Inc. -package utils +package proto import ( "fmt" diff --git a/pkg/proto/utils/workloadmeta_test.go b/pkg/util/proto/workloadmeta_test.go similarity index 99% rename from pkg/proto/utils/workloadmeta_test.go rename to pkg/util/proto/workloadmeta_test.go index ebaeb28aab12c..0ef1818e7f1bf 100644 --- a/pkg/proto/utils/workloadmeta_test.go +++ b/pkg/util/proto/workloadmeta_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-2020 Datadog, Inc. -package utils +package proto import ( "testing" diff --git a/pkg/util/trivy/containerd.go b/pkg/util/trivy/containerd.go index 39a6f2579b0f4..e9d1709ef89ab 100644 --- a/pkg/util/trivy/containerd.go +++ b/pkg/util/trivy/containerd.go @@ -20,7 +20,6 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/images/archive" "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/platforms" refdocker "github.com/containerd/containerd/reference/docker" api "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" @@ -62,7 +61,7 @@ func imageWriter(client *containerd.Client, img containerd.Image) imageSave { } imgOpts := archive.WithImage(client.ImageService(), ref[0]) manifestOpts := archive.WithManifest(img.Target()) - platOpts := archive.WithPlatform(platforms.DefaultStrict()) + platOpts := archive.WithPlatform(img.Platform()) pr, pw := io.Pipe() go func() { pw.CloseWithError(archive.Export(ctx, client.ContentStore(), pw, imgOpts, manifestOpts, platOpts)) diff --git a/pkg/util/trivy/trivy.go b/pkg/util/trivy/trivy.go index d619e94d4938f..9c3ae25fae423 100644 --- a/pkg/util/trivy/trivy.go +++ b/pkg/util/trivy/trivy.go @@ -102,7 +102,7 @@ func defaultCollectorConfig(cacheLocation string) CollectorConfig { ClearCacheOnClose: true, } - collectorConfig.CacheProvider = cacheProvider(cacheLocation, config.Datadog.GetBool("sbom.use_custom_cache")) + collectorConfig.CacheProvider = cacheProvider(cacheLocation, config.Datadog.GetBool("sbom.cache.enabled")) return collectorConfig } @@ -112,8 +112,8 @@ func cacheProvider(cacheLocation string, useCustomCache bool) func() (cache.Cach return func() (cache.Cache, CacheCleaner, error) { return NewCustomBoltCache( cacheLocation, - config.Datadog.GetInt("sbom.custom_cache_max_cache_entries"), - config.Datadog.GetInt("sbom.custom_cache_max_disk_size"), + config.Datadog.GetInt("sbom.cache.max_cache_entries"), + config.Datadog.GetInt("sbom.cache.max_disk_size"), ) } } diff --git a/pkg/workloadmeta/README.md b/pkg/workloadmeta/README.md index bbcdbf015b922..601fc1b282c21 100644 --- a/pkg/workloadmeta/README.md +++ b/pkg/workloadmeta/README.md @@ -26,7 +26,7 @@ When this occurs, information from those sources is merged into one entity. The _Store_ is the central component of the package, storing the set of entities. A store has a set of _collectors_ responsible for notifying the store of workload changes. -Each collector is specialized to a particular external service such as Kuberntes or ECS, roughly corresponding to a source. +Each collector is specialized to a particular external service such as Kubernetes or ECS, roughly corresponding to a source. Collectors can either poll for updates, or translate a stream of events from the external service, as appropriate. The store provides information to other components either through subscriptions or by querying the current state. diff --git a/pkg/workloadmeta/collectors/internal/containerd/containerd.go b/pkg/workloadmeta/collectors/internal/containerd/containerd.go index a7f1e74f53fe5..89e7ef3c9dc55 100644 --- a/pkg/workloadmeta/collectors/internal/containerd/containerd.go +++ b/pkg/workloadmeta/collectors/internal/containerd/containerd.go @@ -395,5 +395,5 @@ func (c *collector) cacheExitInfo(id string, exitCode *uint32, exitTS time.Time) } func imageMetadataCollectionIsEnabled() bool { - return config.Datadog.GetBool("container_image_collection.metadata.enabled") + return config.Datadog.GetBool("container_image.enabled") } diff --git a/pkg/workloadmeta/collectors/internal/containerd/image_sbom_trivy.go b/pkg/workloadmeta/collectors/internal/containerd/image_sbom_trivy.go index b3b46726cae8f..509858561cb3a 100644 --- a/pkg/workloadmeta/collectors/internal/containerd/image_sbom_trivy.go +++ b/pkg/workloadmeta/collectors/internal/containerd/image_sbom_trivy.go @@ -20,7 +20,7 @@ import ( ) func sbomCollectionIsEnabled() bool { - return imageMetadataCollectionIsEnabled() && config.Datadog.GetBool("container_image_collection.sbom.enabled") + return imageMetadataCollectionIsEnabled() && config.Datadog.GetBool("sbom.container_image.enabled") } func (c *collector) startSBOMCollection(ctx context.Context) error { @@ -114,7 +114,7 @@ func (c *collector) extractSBOMWithTrivy(ctx context.Context, storedImage *workl Image: containerdImage, ImageMeta: storedImage, ContainerdClient: c.containerdClient, - FromFilesystem: config.Datadog.GetBool("container_image_collection.sbom.use_mount"), + FromFilesystem: config.Datadog.GetBool("sbom.container_image.use_mount"), } if err = c.sbomScanner.Scan(scanRequest, c.scanOptions, resultChan); err != nil { log.Errorf("Failed to trigger SBOM generation for containerd: %s", err) diff --git a/pkg/workloadmeta/collectors/internal/docker/image_sbom_trivy.go b/pkg/workloadmeta/collectors/internal/docker/image_sbom_trivy.go index 1627e4b1cfcab..45992d9bc7620 100644 --- a/pkg/workloadmeta/collectors/internal/docker/image_sbom_trivy.go +++ b/pkg/workloadmeta/collectors/internal/docker/image_sbom_trivy.go @@ -22,11 +22,11 @@ import ( ) func imageMetadataCollectionIsEnabled() bool { - return config.Datadog.GetBool("container_image_collection.metadata.enabled") + return config.Datadog.GetBool("container_image.enabled") } func sbomCollectionIsEnabled() bool { - return imageMetadataCollectionIsEnabled() && config.Datadog.GetBool("container_image_collection.sbom.enabled") + return imageMetadataCollectionIsEnabled() && config.Datadog.GetBool("sbom.container_image.enabled") } func (c *collector) startSBOMCollection(ctx context.Context) error { diff --git a/pkg/workloadmeta/collectors/internal/kubeapiserver/kubeapiserver.go b/pkg/workloadmeta/collectors/internal/kubeapiserver/kubeapiserver.go index 3d7da69806590..9f81ca6ea9141 100644 --- a/pkg/workloadmeta/collectors/internal/kubeapiserver/kubeapiserver.go +++ b/pkg/workloadmeta/collectors/internal/kubeapiserver/kubeapiserver.go @@ -18,7 +18,6 @@ import ( "k8s.io/client-go/tools/cache" "github.com/DataDog/datadog-agent/pkg/config" - "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/workloadmeta" ) @@ -38,36 +37,52 @@ func init() { } func (c *collector) Start(ctx context.Context, wlmetaStore workloadmeta.Store) error { - if !config.Datadog.GetBool("cluster_agent.collect_kubernetes_tags") { - return errors.NewDisabled(componentName, "Cluster Agent tag collection is disabled, disabling kubeapiserver collector") - } - apiserverClient, err := apiserver.GetAPIClient() if err != nil { return err } client := apiserverClient.Cl - namespace := metav1.NamespaceAll - listerWatcher := &cache.ListWatch{ + nodeListerWatcher := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return client.CoreV1().Pods(namespace).List(ctx, options) + return client.CoreV1().Nodes().List(ctx, options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return client.CoreV1().Pods(namespace).Watch(ctx, options) + return client.CoreV1().Nodes().Watch(ctx, options) }, } - reflector := cache.NewNamedReflector( + nodeReflector := cache.NewNamedReflector( componentName, - listerWatcher, - &corev1.Pod{}, - newReflectorStore(wlmetaStore), + nodeListerWatcher, + &corev1.Node{}, + newNodeReflectorStore(wlmetaStore), noResync, ) - go reflector.Run(ctx.Done()) + go nodeReflector.Run(ctx.Done()) + + if config.Datadog.GetBool("cluster_agent.collect_kubernetes_tags") { + podListerWatcher := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return client.CoreV1().Pods(metav1.NamespaceAll).List(ctx, options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return client.CoreV1().Pods(metav1.NamespaceAll).Watch(ctx, options) + }, + } + + podReflector := cache.NewNamedReflector( + componentName, + podListerWatcher, + &corev1.Pod{}, + newPodReflectorStore(wlmetaStore), + noResync, + ) + + go podReflector.Run(ctx.Done()) + } return nil } diff --git a/pkg/workloadmeta/collectors/internal/kubeapiserver/reflector_store.go b/pkg/workloadmeta/collectors/internal/kubeapiserver/reflector_store.go index d418cca0b9591..6435f174f8752 100644 --- a/pkg/workloadmeta/collectors/internal/kubeapiserver/reflector_store.go +++ b/pkg/workloadmeta/collectors/internal/kubeapiserver/reflector_store.go @@ -13,6 +13,7 @@ import ( "sync" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" utilserror "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/cache" @@ -24,34 +25,49 @@ import ( type reflectorStore struct { wlmetaStore workloadmeta.Store - mu sync.Mutex - seen map[string]workloadmeta.EntityID + mu sync.Mutex + seen map[string]workloadmeta.EntityID +} + +type podReflectorStore struct { + reflectorStore + options *parseOptions } -func newReflectorStore(wlmetaStore workloadmeta.Store) cache.Store { +type nodeReflectorStore struct { + reflectorStore +} + +func newPodReflectorStore(wlmetaStore workloadmeta.Store) cache.Store { annotationsExclude := config.Datadog.GetStringSlice("cluster_agent.kubernetes_resources_collection.pod_annotations_exclude") parseOptions, err := newParseOptions(annotationsExclude) if err != nil { _ = log.Errorf("unable to parse all pod_annotations_exclude: %v, err:", err) } - return &reflectorStore{ - wlmetaStore: wlmetaStore, - seen: make(map[string]workloadmeta.EntityID), - options: parseOptions, + return &podReflectorStore{ + reflectorStore: reflectorStore{ + wlmetaStore: wlmetaStore, + seen: make(map[string]workloadmeta.EntityID), + }, + options: parseOptions, } } -// Add notifies the workloadmeta store with an EventTypeSet for the given -// object. -func (r *reflectorStore) Add(obj interface{}) error { +func newNodeReflectorStore(wlmetaStore workloadmeta.Store) cache.Store { + return &nodeReflectorStore{ + reflectorStore: reflectorStore{ + wlmetaStore: wlmetaStore, + seen: make(map[string]workloadmeta.EntityID), + }, + } +} + +func (r *reflectorStore) add(uid types.UID, entity workloadmeta.Entity) error { r.mu.Lock() defer r.mu.Unlock() - pod := obj.(*corev1.Pod) - entity := parsePod(pod, r.options) - - r.seen[string(pod.UID)] = entity.EntityID + r.seen[string(uid)] = entity.GetID() r.wlmetaStore.Notify([]workloadmeta.CollectorEvent{ { @@ -64,9 +80,33 @@ func (r *reflectorStore) Add(obj interface{}) error { return nil } +// Add notifies the workloadmeta store with an EventTypeSet for the given +// object. +func (r *podReflectorStore) Add(obj interface{}) error { + pod := obj.(*corev1.Pod) + entity := parsePod(pod, r.options) + + return r.add(pod.UID, entity) +} + +// Add notifies the workloadmeta store with an EventTypeSet for the given +// object. +func (r *nodeReflectorStore) Add(obj interface{}) error { + node := obj.(*corev1.Node) + entity := parseNode(node) + + return r.add(node.UID, entity) +} + +// Update notifies the workloadmeta store with an EventTypeSet for the given +// object. +func (r *podReflectorStore) Update(obj interface{}) error { + return r.Add(obj) +} + // Update notifies the workloadmeta store with an EventTypeSet for the given // object. -func (r *reflectorStore) Update(obj interface{}) error { +func (r *nodeReflectorStore) Update(obj interface{}) error { return r.Add(obj) } @@ -76,9 +116,17 @@ func (r *reflectorStore) Delete(obj interface{}) error { r.mu.Lock() defer r.mu.Unlock() - pod := obj.(*corev1.Pod) + var kind workloadmeta.Kind + var uid types.UID + if pod, ok := obj.(*corev1.Pod); ok { + kind = workloadmeta.KindKubernetesPod + uid = pod.UID + } else if node, ok := obj.(*corev1.Node); ok { + kind = workloadmeta.KindKubernetesNode + uid = node.UID + } - delete(r.seen, string(pod.UID)) + delete(r.seen, string(uid)) r.wlmetaStore.Notify([]workloadmeta.CollectorEvent{ { @@ -86,8 +134,8 @@ func (r *reflectorStore) Delete(obj interface{}) error { Source: collectorID, Entity: &workloadmeta.KubernetesPod{ EntityID: workloadmeta.EntityID{ - Kind: workloadmeta.KindKubernetesPod, - ID: string(pod.UID), + Kind: kind, + ID: string(uid), }, }, }, @@ -96,9 +144,14 @@ func (r *reflectorStore) Delete(obj interface{}) error { return nil } +type entityUid struct { + entity workloadmeta.Entity + uid types.UID +} + // Replace diffs the given list with the contents of the workloadmeta store // (through r.seen), and updates and deletes the necessary objects. -func (r *reflectorStore) Replace(list []interface{}, _ string) error { +func (r *reflectorStore) replace(entities []entityUid) error { r.mu.Lock() defer r.mu.Unlock() @@ -107,10 +160,9 @@ func (r *reflectorStore) Replace(list []interface{}, _ string) error { seenNow := make(map[string]workloadmeta.EntityID) seenBefore := r.seen - for _, obj := range list { - pod := obj.(*corev1.Pod) - podUID := string(pod.UID) - entity := parsePod(pod, r.options) + for _, entityuid := range entities { + entity := entityuid.entity + uid := string(entityuid.uid) events = append(events, workloadmeta.CollectorEvent{ Type: workloadmeta.EventTypeSet, @@ -118,11 +170,11 @@ func (r *reflectorStore) Replace(list []interface{}, _ string) error { Entity: entity, }) - if _, ok := seenBefore[podUID]; ok { - delete(seenBefore, podUID) + if _, ok := seenBefore[uid]; ok { + delete(seenBefore, uid) } - seenNow[podUID] = entity.EntityID + seenNow[uid] = entity.GetID() } for _, entityID := range seenBefore { @@ -142,6 +194,32 @@ func (r *reflectorStore) Replace(list []interface{}, _ string) error { return nil } +// Replace diffs the given list with the contents of the workloadmeta store +// (through r.seen), and updates and deletes the necessary objects. +func (r *podReflectorStore) Replace(list []interface{}, _ string) error { + entities := make([]entityUid, 0, len(list)) + + for _, obj := range list { + pod := obj.(*corev1.Pod) + entities = append(entities, entityUid{parsePod(pod, r.options), pod.UID}) + } + + return r.replace(entities) +} + +// Replace diffs the given list with the contents of the workloadmeta store +// (through r.seen), and updates and deletes the necessary objects. +func (r *nodeReflectorStore) Replace(list []interface{}, _ string) error { + entities := make([]entityUid, 0, len(list)) + + for _, obj := range list { + node := obj.(*corev1.Node) + entities = append(entities, entityUid{parseNode(node), node.UID}) + } + + return r.replace(entities) +} + // List is not implemented func (r *reflectorStore) List() []interface{} { panic("not implemented") @@ -240,6 +318,20 @@ func parsePod(pod *corev1.Pod, options *parseOptions) *workloadmeta.KubernetesPo } } +func parseNode(node *corev1.Node) *workloadmeta.KubernetesNode { + return &workloadmeta.KubernetesNode{ + EntityID: workloadmeta.EntityID{ + Kind: workloadmeta.KindKubernetesNode, + ID: node.Name, + }, + EntityMeta: workloadmeta.EntityMeta{ + Name: node.Name, + Annotations: node.Annotations, + Labels: node.Labels, + }, + } +} + func filterMapStringKey(mapInput map[string]string, keyFilters []*regexp.Regexp) map[string]string { for key := range mapInput { for _, filter := range keyFilters { diff --git a/pkg/workloadmeta/collectors/internal/remoteworkloadmeta/remoteworkloadmeta.go b/pkg/workloadmeta/collectors/internal/remoteworkloadmeta/remoteworkloadmeta.go index 064ee7e3afde1..05e3f7033686c 100644 --- a/pkg/workloadmeta/collectors/internal/remoteworkloadmeta/remoteworkloadmeta.go +++ b/pkg/workloadmeta/collectors/internal/remoteworkloadmeta/remoteworkloadmeta.go @@ -22,9 +22,9 @@ import ( "github.com/DataDog/datadog-agent/pkg/api/security" "github.com/DataDog/datadog-agent/pkg/config" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo" - "github.com/DataDog/datadog-agent/pkg/proto/utils" grpcutil "github.com/DataDog/datadog-agent/pkg/util/grpc" "github.com/DataDog/datadog-agent/pkg/util/log" + protoutils "github.com/DataDog/datadog-agent/pkg/util/proto" "github.com/DataDog/datadog-agent/pkg/workloadmeta" "github.com/DataDog/datadog-agent/pkg/workloadmeta/telemetry" ) @@ -201,7 +201,7 @@ func (c *collector) processResponse(response *pb.WorkloadmetaStreamResponse) err var collectorEvents []workloadmeta.CollectorEvent for _, protoEvent := range response.Events { - workloadmetaEvent, err := utils.WorkloadmetaEventFromProtoEvent(protoEvent) + workloadmetaEvent, err := protoutils.WorkloadmetaEventFromProtoEvent(protoEvent) if err != nil { return err } diff --git a/pkg/workloadmeta/dump.go b/pkg/workloadmeta/dump.go index d3b22fec8f78e..f69745eaed170 100644 --- a/pkg/workloadmeta/dump.go +++ b/pkg/workloadmeta/dump.go @@ -53,6 +53,8 @@ func (s *store) Dump(verbose bool) WorkloadDumpResponse { info = e.String(verbose) case *KubernetesPod: info = e.String(verbose) + case *KubernetesNode: + info = e.String(verbose) case *ECSTask: info = e.String(verbose) case *ContainerImageMetadata: diff --git a/pkg/workloadmeta/server/server.go b/pkg/workloadmeta/server/server.go index e9fed83cf2fed..1ee6dc7937085 100644 --- a/pkg/workloadmeta/server/server.go +++ b/pkg/workloadmeta/server/server.go @@ -9,9 +9,9 @@ import ( "time" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo" - "github.com/DataDog/datadog-agent/pkg/proto/utils" "github.com/DataDog/datadog-agent/pkg/util/grpc" "github.com/DataDog/datadog-agent/pkg/util/log" + protoutils "github.com/DataDog/datadog-agent/pkg/util/proto" "github.com/DataDog/datadog-agent/pkg/workloadmeta" "github.com/DataDog/datadog-agent/pkg/workloadmeta/telemetry" ) @@ -33,7 +33,7 @@ type Server struct { // StreamEntities streams entities from the workloadmeta store applying the given filter func (s *Server) StreamEntities(in *pb.WorkloadmetaStreamRequest, out pb.AgentSecure_WorkloadmetaStreamEntitiesServer) error { - filter, err := utils.WorkloadmetaFilterFromProtoFilter(in.GetFilter()) + filter, err := protoutils.WorkloadmetaFilterFromProtoFilter(in.GetFilter()) if err != nil { return err } @@ -54,7 +54,7 @@ func (s *Server) StreamEntities(in *pb.WorkloadmetaStreamRequest, out pb.AgentSe var protobufEvents []*pb.WorkloadmetaEvent for _, event := range eventBundle.Events { - protobufEvent, err := utils.ProtobufEventFromWorkloadmetaEvent(event) + protobufEvent, err := protoutils.ProtobufEventFromWorkloadmetaEvent(event) if err != nil { log.Errorf("error converting workloadmeta event to protobuf: %s", err) diff --git a/pkg/workloadmeta/store.go b/pkg/workloadmeta/store.go index e7809f2dbf5bb..0606bc720fd1d 100644 --- a/pkg/workloadmeta/store.go +++ b/pkg/workloadmeta/store.go @@ -294,6 +294,16 @@ func (s *store) GetKubernetesPodForContainer(containerID string) (*KubernetesPod return nil, errors.NewNotFound(containerID) } +// GetKubernetesNode implements Store#GetKubernetesNode +func (s *store) GetKubernetesNode(id string) (*KubernetesNode, error) { + entity, err := s.getEntityByKind(KindKubernetesNode, id) + if err != nil { + return nil, err + } + + return entity.(*KubernetesNode), nil +} + // GetECSTask implements Store#GetECSTask func (s *store) GetECSTask(id string) (*ECSTask, error) { entity, err := s.getEntityByKind(KindECSTask, id) diff --git a/pkg/workloadmeta/testing/store.go b/pkg/workloadmeta/testing/store.go index ea520fad73a74..613b9df4b23f7 100644 --- a/pkg/workloadmeta/testing/store.go +++ b/pkg/workloadmeta/testing/store.go @@ -94,6 +94,16 @@ func (s *Store) GetKubernetesPodForContainer(containerID string) (*workloadmeta. return nil, errors.NewNotFound(containerID) } +// GetKubernetesNode returns metadata about a Kubernetes node. +func (s *Store) GetKubernetesNode(id string) (*workloadmeta.KubernetesNode, error) { + entity, err := s.getEntityByKind(workloadmeta.KindKubernetesPod, id) + if err != nil { + return nil, err + } + + return entity.(*workloadmeta.KubernetesNode), nil +} + // GetECSTask returns metadata about an ECS task. func (s *Store) GetECSTask(id string) (*workloadmeta.ECSTask, error) { entity, err := s.getEntityByKind(workloadmeta.KindECSTask, id) diff --git a/pkg/workloadmeta/types.go b/pkg/workloadmeta/types.go index d13bda193c3de..9afe1953922eb 100644 --- a/pkg/workloadmeta/types.go +++ b/pkg/workloadmeta/types.go @@ -72,6 +72,10 @@ type Store interface { // for one containing the given container. GetKubernetesPodForContainer(containerID string) (*KubernetesPod, error) + // GetKubernetesNode returns metadata about a Kubernetes node. It fetches + // the entity with kind KindKubernetesNode and the given ID. + GetKubernetesNode(id string) (*KubernetesNode, error) + // GetECSTask returns metadata about an ECS task. It fetches the entity with // kind KindECSTask and the given ID. GetECSTask(id string) (*ECSTask, error) @@ -108,6 +112,7 @@ type Kind string const ( KindContainer Kind = "container" KindKubernetesPod Kind = "kubernetes_pod" + KindKubernetesNode Kind = "kubernetes_node" KindECSTask Kind = "ecs_task" KindContainerImageMetadata Kind = "container_image_metadata" ) @@ -624,6 +629,47 @@ func (o KubernetesPodOwner) String(verbose bool) string { return sb.String() } +// KubernetesNode is an Entity representing a Kubernetes Node. +type KubernetesNode struct { + EntityID + EntityMeta +} + +// GetID implements Entity#GetID. +func (n *KubernetesNode) GetID() EntityID { + return n.EntityID +} + +// Merge implements Entity#Merge. +func (n *KubernetesNode) Merge(e Entity) error { + nn, ok := e.(*KubernetesNode) + if !ok { + return fmt.Errorf("cannot merge KubernetesNode with different kind %T", e) + } + + return merge(n, nn) +} + +// DeepCopy implements Entity#DeepCopy. +func (n KubernetesNode) DeepCopy() Entity { + cn := deepcopy.Copy(n).(KubernetesNode) + return &cn +} + +// String implements Entity#String +func (n KubernetesNode) String(verbose bool) string { + var sb strings.Builder + _, _ = fmt.Fprintln(&sb, "----------- Entity ID -----------") + _, _ = fmt.Fprintln(&sb, n.EntityID.String(verbose)) + + _, _ = fmt.Fprintln(&sb, "----------- Entity Meta -----------") + _, _ = fmt.Fprint(&sb, n.EntityMeta.String(verbose)) + + return sb.String() +} + +var _ Entity = &KubernetesNode{} + // ECSTask is an Entity representing an ECS Task. type ECSTask struct { EntityID diff --git a/release.json b/release.json index b977746731ea4..28983fc66f3f7 100644 --- a/release.json +++ b/release.json @@ -1,15 +1,15 @@ { "base_branch": "main", "last_stable": { - "6": "6.43.0", - "7": "7.43.0" + "6": "6.44.1", + "7": "7.44.1" }, "nightly": { "INTEGRATIONS_CORE_VERSION": "master", "OMNIBUS_SOFTWARE_VERSION": "master", "OMNIBUS_RUBY_VERSION": "datadog-5.5.0", - "JMXFETCH_VERSION": "0.47.8", - "JMXFETCH_HASH": "aff48a7589b507fd41367686f15307005d498d94a260efbc771963cedd52f16f", + "JMXFETCH_VERSION": "0.47.9", + "JMXFETCH_HASH": "fb5c3fc2fb42db1ee5c3a7c6187b316d79d69c50b967db0b7ffde590622d0395", "MACOS_BUILD_VERSION": "master", "WINDOWS_DDNPM_DRIVER": "release-signed", "WINDOWS_DDNPM_VERSION": "2.4.1", @@ -20,8 +20,8 @@ "INTEGRATIONS_CORE_VERSION": "master", "OMNIBUS_SOFTWARE_VERSION": "master", "OMNIBUS_RUBY_VERSION": "datadog-5.5.0", - "JMXFETCH_VERSION": "0.47.8", - "JMXFETCH_HASH": "aff48a7589b507fd41367686f15307005d498d94a260efbc771963cedd52f16f", + "JMXFETCH_VERSION": "0.47.9", + "JMXFETCH_HASH": "fb5c3fc2fb42db1ee5c3a7c6187b316d79d69c50b967db0b7ffde590622d0395", "MACOS_BUILD_VERSION": "master", "WINDOWS_DDNPM_DRIVER": "release-signed", "WINDOWS_DDNPM_VERSION": "2.4.1", @@ -29,25 +29,25 @@ "SECURITY_AGENT_POLICIES_VERSION": "master" }, "release-a6": { - "INTEGRATIONS_CORE_VERSION": "7.45.0-rc.3", - "OMNIBUS_SOFTWARE_VERSION": "7.45.0-rc.1", - "OMNIBUS_RUBY_VERSION": "7.45.0-rc.1", - "JMXFETCH_VERSION": "0.47.8", - "JMXFETCH_HASH": "aff48a7589b507fd41367686f15307005d498d94a260efbc771963cedd52f16f", - "SECURITY_AGENT_POLICIES_VERSION": "v0.45.1", - "MACOS_BUILD_VERSION": "6.45.0-rc.1", + "INTEGRATIONS_CORE_VERSION": "7.46.0-rc.2", + "OMNIBUS_SOFTWARE_VERSION": "7.46.0-rc.2", + "OMNIBUS_RUBY_VERSION": "7.46.0-rc.1", + "JMXFETCH_VERSION": "0.47.9", + "JMXFETCH_HASH": "fb5c3fc2fb42db1ee5c3a7c6187b316d79d69c50b967db0b7ffde590622d0395", + "SECURITY_AGENT_POLICIES_VERSION": "v0.46.0", + "MACOS_BUILD_VERSION": "6.46.0-rc.1", "WINDOWS_DDNPM_DRIVER": "release-signed", "WINDOWS_DDNPM_VERSION": "2.4.1", "WINDOWS_DDNPM_SHASUM": "f12af44306eac3ea15828fd12c24d44ae519692a94a0f1f5d4fa868c3e596b07" }, "release-a7": { - "INTEGRATIONS_CORE_VERSION": "7.45.0-rc.3", - "OMNIBUS_SOFTWARE_VERSION": "7.45.0-rc.1", - "OMNIBUS_RUBY_VERSION": "7.45.0-rc.1", - "JMXFETCH_VERSION": "0.47.8", - "JMXFETCH_HASH": "aff48a7589b507fd41367686f15307005d498d94a260efbc771963cedd52f16f", - "SECURITY_AGENT_POLICIES_VERSION": "v0.45.1", - "MACOS_BUILD_VERSION": "7.45.0-rc.1", + "INTEGRATIONS_CORE_VERSION": "7.46.0-rc.2", + "OMNIBUS_SOFTWARE_VERSION": "7.46.0-rc.2", + "OMNIBUS_RUBY_VERSION": "7.46.0-rc.1", + "JMXFETCH_VERSION": "0.47.9", + "JMXFETCH_HASH": "fb5c3fc2fb42db1ee5c3a7c6187b316d79d69c50b967db0b7ffde590622d0395", + "SECURITY_AGENT_POLICIES_VERSION": "v0.46.0", + "MACOS_BUILD_VERSION": "7.46.0-rc.1", "WINDOWS_DDNPM_DRIVER": "release-signed", "WINDOWS_DDNPM_VERSION": "2.4.1", "WINDOWS_DDNPM_SHASUM": "f12af44306eac3ea15828fd12c24d44ae519692a94a0f1f5d4fa868c3e596b07" diff --git a/releasenotes-dca/notes/collect-resource-conditions-84adbeaa73033b99.yaml b/releasenotes-dca/notes/collect-resource-conditions-84adbeaa73033b99.yaml new file mode 100644 index 0000000000000..7a315447fa7ad --- /dev/null +++ b/releasenotes-dca/notes/collect-resource-conditions-84adbeaa73033b99.yaml @@ -0,0 +1,10 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - Collect conditions for a variety of Kubernetes resources. diff --git a/releasenotes-dca/notes/collect-volume-source-65e370cf8f825843.yaml b/releasenotes-dca/notes/collect-volume-source-65e370cf8f825843.yaml new file mode 100644 index 0000000000000..99feb9523d9b9 --- /dev/null +++ b/releasenotes-dca/notes/collect-volume-source-65e370cf8f825843.yaml @@ -0,0 +1,13 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + Collect persistent volume source in the orchestrator check. + + diff --git a/releasenotes-dca/notes/forwarder-idle-timeout-fix-46d67c8d9ca78b43.yaml b/releasenotes-dca/notes/forwarder-idle-timeout-fix-46d67c8d9ca78b43.yaml new file mode 100644 index 0000000000000..d09a16ef54f72 --- /dev/null +++ b/releasenotes-dca/notes/forwarder-idle-timeout-fix-46d67c8d9ca78b43.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG-DCA.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fix the timeout for idle HTTP connections. diff --git a/releasenotes-dca/notes/improve_dca_leader-8444de5fe59ef9d6.yaml b/releasenotes-dca/notes/improve_dca_leader-8444de5fe59ef9d6.yaml new file mode 100644 index 0000000000000..2eb3df3dca03f --- /dev/null +++ b/releasenotes-dca/notes/improve_dca_leader-8444de5fe59ef9d6.yaml @@ -0,0 +1,12 @@ +# Each section from every release note are combined when the +# CHANGELOG-DCA.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + When the cluster-agent is started with ``hostNetwork: true``, the leader election mechanism was using a node name instead of the pod name. This was breaking the “follower to leader” forwarding mechanism. + This change introduce the ``DD_POD_NAME`` environment variable as a more reliable way to set the cluster-agent pod name. It is supposed to be filled by the Kubernetes downward API. diff --git a/releasenotes-dca/notes/vpa-collection-d52dd3798a093c92.yaml b/releasenotes-dca/notes/vpa-collection-d52dd3798a093c92.yaml new file mode 100644 index 0000000000000..e31045f4e4a61 --- /dev/null +++ b/releasenotes-dca/notes/vpa-collection-d52dd3798a093c92.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Enable collection of Vertical Pod Autoscalers by default in the orchestrator check. diff --git a/releasenotes/notes/Add-support-for-new-DBM-metadata-feature-2115d5e9098ec593.yaml b/releasenotes/notes/Add-support-for-new-DBM-metadata-feature-2115d5e9098ec593.yaml new file mode 100644 index 0000000000000..4df4fa68b721d --- /dev/null +++ b/releasenotes/notes/Add-support-for-new-DBM-metadata-feature-2115d5e9098ec593.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + This change adds support for ingesting information such as database settings and schemas as database "metadata" diff --git a/releasenotes/notes/Deprecate-Windows-versions-lower-than-2012-and-8.1-bd407a6ae7659ffd.yaml b/releasenotes/notes/Deprecate-Windows-versions-lower-than-2012-and-8.1-bd407a6ae7659ffd.yaml new file mode 100644 index 0000000000000..5d48e7d1a50bf --- /dev/null +++ b/releasenotes/notes/Deprecate-Windows-versions-lower-than-2012-and-8.1-bd407a6ae7659ffd.yaml @@ -0,0 +1,4 @@ +--- +deprecations: + - | + Installing the Agent on Windows Server versions lower than 2012 and client versions lower than 8.1 is now deprecated. diff --git a/releasenotes/notes/add-compliance-kubernetes-config-logs-55a247d576286850.yaml b/releasenotes/notes/add-compliance-kubernetes-config-logs-55a247d576286850.yaml new file mode 100644 index 0000000000000..f8efc14184e24 --- /dev/null +++ b/releasenotes/notes/add-compliance-kubernetes-config-logs-55a247d576286850.yaml @@ -0,0 +1,4 @@ +features: + - | + Add the capability for the security-agent compliance module to export + detailed Kubernetes node configurations. diff --git a/releasenotes/notes/add-unsafe-disable-verification-a0e211142556d38a.yaml b/releasenotes/notes/add-unsafe-disable-verification-a0e211142556d38a.yaml new file mode 100644 index 0000000000000..32874a276166e --- /dev/null +++ b/releasenotes/notes/add-unsafe-disable-verification-a0e211142556d38a.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add `unsafe-disable-verification` flag to skip TUF/in-toto verification when downloading and installing wheels with the `integrations install` command diff --git a/releasenotes/notes/added-two-new-metrics-to-KSM-core-check-df655446dc43cded.yaml b/releasenotes/notes/added-two-new-metrics-to-KSM-core-check-df655446dc43cded.yaml new file mode 100644 index 0000000000000..3432a23a2fb29 --- /dev/null +++ b/releasenotes/notes/added-two-new-metrics-to-KSM-core-check-df655446dc43cded.yaml @@ -0,0 +1,12 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + Add two new metrics to the Kubernetes State Core check: `kubernetes_state.configmap.count` and `kubernetes_state.secret.count`. + \ No newline at end of file diff --git a/releasenotes/notes/agent-sql-trace-e2d70c1e5f4dfa4b.yaml b/releasenotes/notes/agent-sql-trace-e2d70c1e5f4dfa4b.yaml new file mode 100644 index 0000000000000..5fbd821b9efb8 --- /dev/null +++ b/releasenotes/notes/agent-sql-trace-e2d70c1e5f4dfa4b.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + Adds the ability to generate an Oracle SQL trace for Agent queries diff --git a/releasenotes/notes/apm-disable-file-logging-3cd11cf32b25b36d.yaml b/releasenotes/notes/apm-disable-file-logging-3cd11cf32b25b36d.yaml new file mode 100644 index 0000000000000..9f097cea34d1a --- /dev/null +++ b/releasenotes/notes/apm-disable-file-logging-3cd11cf32b25b36d.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + APM: The `disable_file_logging` setting is now respected. diff --git a/releasenotes/notes/collect-resource-conditions-84adbeaa73033b99.yaml b/releasenotes/notes/collect-resource-conditions-84adbeaa73033b99.yaml new file mode 100644 index 0000000000000..7a315447fa7ad --- /dev/null +++ b/releasenotes/notes/collect-resource-conditions-84adbeaa73033b99.yaml @@ -0,0 +1,10 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - Collect conditions for a variety of Kubernetes resources. diff --git a/releasenotes/notes/container-working-set-4e921ffca460ca44.yaml b/releasenotes/notes/container-working-set-4e921ffca460ca44.yaml new file mode 100644 index 0000000000000..06730580a4867 --- /dev/null +++ b/releasenotes/notes/container-working-set-4e921ffca460ca44.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add `container.memory.working_set` metric on Linux (computed as Usage - InactiveFile) and Windows (mapped to Private Working Set) diff --git a/releasenotes/notes/dogstatsd-logger-ce0ed4f5624bb13c.yaml b/releasenotes/notes/dogstatsd-logger-ce0ed4f5624bb13c.yaml new file mode 100644 index 0000000000000..c823d5ea76e19 --- /dev/null +++ b/releasenotes/notes/dogstatsd-logger-ce0ed4f5624bb13c.yaml @@ -0,0 +1,12 @@ +--- +features: + - | + Enabling ``dogstatsd_metrics_stats_enable`` will now enable ``dogstatsd_logging_enabled``. When enabled, ``dogstatsd_logging_enabled`` generates dogstatsd log files at: + - For ``Windows``: ``c:\programdata\datadog\logs\dogstatsd_info\dogstatsd-stats.log`` + - For ``Linux``: ``/var/log/datadog/dogstatsd_info/dogstatsd-stats.log`` + - For ``MacOS``: ``/opt/datadog-agent/logs/dogstatsd_info/dogstatsd-stats.log`` + These log files are also automatically attached to the flare. + - | + You can adjust the dogstatsd-stats logging configuration by using: + - dogstatsd_log_file_max_size: ``SizeInBytes`` (default: ``dogstatsd_log_file_max_size:"10Mb"``) + - dogstatsd_log_file_max_rolls: ``Int`` (default: ``dogstatsd_log_file_max_rolls:3``) \ No newline at end of file diff --git a/releasenotes/notes/enable-http-monitoring-config-value-21ae9f834e3e2c67.yaml b/releasenotes/notes/enable-http-monitoring-config-value-21ae9f834e3e2c67.yaml new file mode 100644 index 0000000000000..a6c33242ebbb4 --- /dev/null +++ b/releasenotes/notes/enable-http-monitoring-config-value-21ae9f834e3e2c67.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + The `network_config.enable_http_monitoring` configuration has changed to `service_monitoring_config.enable_http_monitoring`. + +deprecations: + - The `network_config.enable_http_monitoring` configuration is now deprecated. Use `service_monitoring_config.enable_http_monitoring` instead. diff --git a/releasenotes/notes/fix-auto-multi-line-integration-config-35449268a4471c19.yaml b/releasenotes/notes/fix-auto-multi-line-integration-config-35449268a4471c19.yaml new file mode 100644 index 0000000000000..1269b850afabb --- /dev/null +++ b/releasenotes/notes/fix-auto-multi-line-integration-config-35449268a4471c19.yaml @@ -0,0 +1,13 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fix an issue where ``auto_multi_line_detection``, ``auto_multi_line_sample_size``, + and ``auto_multi_line_match_threshold`` were not working when set though a pod + annotation or container label. \ No newline at end of file diff --git a/releasenotes/notes/fix-ksm-core-unknown-resources-4f47a0e086687265.yaml b/releasenotes/notes/fix-ksm-core-unknown-resources-4f47a0e086687265.yaml new file mode 100644 index 0000000000000..d49276a912299 --- /dev/null +++ b/releasenotes/notes/fix-ksm-core-unknown-resources-4f47a0e086687265.yaml @@ -0,0 +1,3 @@ +fixes: + - | + Fixes `kubernetes_state_core` crash when unknown resources are provided. diff --git a/releasenotes/notes/fix-pcf-file-descriptor-leak-db471bd34281f92d.yaml b/releasenotes/notes/fix-pcf-file-descriptor-leak-db471bd34281f92d.yaml new file mode 100644 index 0000000000000..1654d803fa9d2 --- /dev/null +++ b/releasenotes/notes/fix-pcf-file-descriptor-leak-db471bd34281f92d.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fix a file descriptors leak in the Cloud Foundry Cluster Agent. diff --git a/releasenotes/notes/forwarder-idle-timeout-fix-d7792df994021cfe.yaml b/releasenotes/notes/forwarder-idle-timeout-fix-d7792df994021cfe.yaml new file mode 100644 index 0000000000000..3cf55e4c466ae --- /dev/null +++ b/releasenotes/notes/forwarder-idle-timeout-fix-d7792df994021cfe.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fix the timeout for idle HTTP connections. diff --git a/releasenotes/notes/inject_task_arn_in_instrumentation_telemetry_payload-25fa30e63a87fc97.yaml b/releasenotes/notes/inject_task_arn_in_instrumentation_telemetry_payload-25fa30e63a87fc97.yaml new file mode 100644 index 0000000000000..9e9c1ed05b90a --- /dev/null +++ b/releasenotes/notes/inject_task_arn_in_instrumentation_telemetry_payload-25fa30e63a87fc97.yaml @@ -0,0 +1,13 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + Inject container tags in instrumentation telemetry payloads + - | + Extract the `task_arn` tag from container tags and add it as its own header. diff --git a/releasenotes/notes/netflow_add_flush_time-81a1e9bf5067da1f.yaml b/releasenotes/notes/netflow_add_flush_time-81a1e9bf5067da1f.yaml new file mode 100644 index 0000000000000..461fb6310e56a --- /dev/null +++ b/releasenotes/notes/netflow_add_flush_time-81a1e9bf5067da1f.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + [pkg/netflow] Add ``flush_timestamp`` to payload. diff --git a/releasenotes/notes/netflow_add_sequence_metrics-9071b2a38795cf01.yaml b/releasenotes/notes/netflow_add_sequence_metrics-9071b2a38795cf01.yaml new file mode 100644 index 0000000000000..640a13f15da2a --- /dev/null +++ b/releasenotes/notes/netflow_add_sequence_metrics-9071b2a38795cf01.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + [pkg/netflow] Add sequence metrics. diff --git a/releasenotes/notes/oracle-execution-plans-67dd85f2d5a19436.yaml b/releasenotes/notes/oracle-execution-plans-67dd85f2d5a19436.yaml new file mode 100644 index 0000000000000..58f9682441f1c --- /dev/null +++ b/releasenotes/notes/oracle-execution-plans-67dd85f2d5a19436.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +features: + - | + Add Oracle execution plans diff --git a/releasenotes/notes/otel-mapping-go-0.2.3-88c535b88e857fa3.yaml b/releasenotes/notes/otel-mapping-go-0.2.3-88c535b88e857fa3.yaml new file mode 100644 index 0000000000000..007cd14873294 --- /dev/null +++ b/releasenotes/notes/otel-mapping-go-0.2.3-88c535b88e857fa3.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Remove thread count from OTel .NET runtime metric mappings. diff --git a/releasenotes/notes/otlp-runtime-metrics-telemetry-metric-865a9cb986f9d25a.yaml b/releasenotes/notes/otlp-runtime-metrics-telemetry-metric-865a9cb986f9d25a.yaml new file mode 100644 index 0000000000000..b9d90f13b2b38 --- /dev/null +++ b/releasenotes/notes/otlp-runtime-metrics-telemetry-metric-865a9cb986f9d25a.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + Adds new metric `datadog.agent.otlp.runtime_metrics` when runtime metrics are being received via OTLP. diff --git a/releasenotes/notes/refactor-sbom-parameters-4af241e77fc00f83.yaml b/releasenotes/notes/refactor-sbom-parameters-4af241e77fc00f83.yaml new file mode 100644 index 0000000000000..5c8aa6355b472 --- /dev/null +++ b/releasenotes/notes/refactor-sbom-parameters-4af241e77fc00f83.yaml @@ -0,0 +1,116 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +upgrade: + - | + Refactor the SBOM collection parameters from:: + + conf.d/container_lifecycle.d/conf.yaml existence (A) # to schedule the container lifecycle long running check + conf.d/container_image.d/conf.yaml existence (B) # to schedule the container image metadata long running check + conf.d/sbom.d/conf.yaml existence (C) # to schedule the SBOM long running check + + Inside datadog.yaml: + + container_lifecycle: + enabled: (D) # Used to control the start of the container_lifecycle forwarder but has been decommissioned by #16084 (7.45.0-rc) + dd_url: # \ + additional_endpoints: # | + use_compression: # | + compression_level: # > generic parameters for the generic EVP pipeline + … # | + use_v2_api: # / + + container_image: + enabled: (E) # Used to control the start of the container_image forwarder but has been decommissioned by #16084 (7.45.0-rc) + dd_url: # \ + additional_endpoints: # | + use_compression: # | + compression_level: # > generic parameters for the generic EVP pipeline + … # | + use_v2_api: # / + + sbom: + enabled: (F) # control host SBOM collection and do **not** control container-related SBOM since #16084 (7.45.0-rc) + dd_url: # \ + additional_endpoints: # | + use_compression: # | + compression_level: # > generic parameters for the generic EVP pipeline + … # | + use_v2_api: # / + analyzers: (G) # trivy analyzers user for host SBOM collection + cache_directory: (H) + clear_cache_on_exit: (I) + use_custom_cache: (J) + custom_cache_max_disk_size: (K) + custom_cache_max_cache_entries: (L) + cache_clean_interval: (M) + + container_image_collection: + metadata: + enabled: (N) # Controls the collection of the container image metadata in workload meta + sbom: + enabled: (O) + use_mount: (P) + scan_interval: (Q) + scan_timeout: (R) + analyzers: (S) # trivy analyzers user for containers SBOM collection + check_disk_usage: (T) + min_available_disk: (U) + + to:: + + conf.d/{container_lifecycle,container_image,sbom}.d/conf.yaml no longer needs to be created. A default version is always shipped with the Agent Docker image with an underscore-prefixed ad_identifier that will be synthesized by the agent at runtime based on config {container_lifecycle,container_image,sbom}.enabled parameters. + + Inside datadog.yaml: + + container_lifecycle: + enabled: (A) # Replaces the need for creating a conf.d/container_lifecycle.d/conf.yaml file + dd_url: # \ + additional_endpoints: # | + use_compression: # | + compression_level: # > unchanged generic parameters for the generic EVP pipeline + … # | + use_v2_api: # / + + container_image: + enabled: (B) # Replaces the need for creating a conf.d/container_image.d/conf.yaml file + dd_url: # \ + additional_endpoints: # | + use_compression: # | + compression_level: # > unchanged generic parameters for the generic EVP pipeline + … # | + use_v2_api: # / + + sbom: + enabled: (C) # Replaces the need for creating a conf.d/sbom.d/conf.yaml file + dd_url: # \ + additional_endpoints: # | + use_compression: # | + compression_level: # > unchanged generic parameters for the generic EVP pipeline + … # | + use_v2_api: # / + cache_directory: (H) + clear_cache_on_exit: (I) + cache: # Factorize all settings related to the custom cache + enabled: (J) + max_disk_size: (K) + max_cache_entries: (L) + clean_interval: (M) + + host: # for host SBOM parameters that were directly below `sbom` before. + enabled: (F) # sbom.host.enabled replaces sbom.enabled + analyzers: (G) # sbom.host.analyzers replaces sbom.analyzers + + container_image: # sbom.container_image replaces container_image_collection.sbom + enabled: (O) + use_mount: (P) + scan_interval: (Q) + scan_timeout: (R) + analyzers: (S) # trivy analyzers user for containers SBOM collection + check_disk_usage: (T) + min_available_disk: (U) diff --git a/releasenotes/notes/upgrade-jmxfetch-0-47-9-46ffbe691f9e5eca.yaml b/releasenotes/notes/upgrade-jmxfetch-0-47-9-46ffbe691f9e5eca.yaml new file mode 100644 index 0000000000000..caf359091ca57 --- /dev/null +++ b/releasenotes/notes/upgrade-jmxfetch-0-47-9-46ffbe691f9e5eca.yaml @@ -0,0 +1,13 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + Upgraded JMXFetch to ``0.47.9`` which has fixes to improve + efficiency when fetching beans, fixes for process attachment + in some JDK versions, and fixes a thread leak. diff --git a/tasks/__init__.py b/tasks/__init__.py index c203f9cee3384..d9a47c524c6dc 100644 --- a/tasks/__init__.py +++ b/tasks/__init__.py @@ -12,7 +12,7 @@ cluster_agent_cloudfoundry, components, customaction, - docker, + docker_tasks, dogstatsd, epforwarder, github, @@ -114,7 +114,7 @@ ns.add_collection(customaction) ns.add_collection(bench) ns.add_collection(trace_agent) -ns.add_collection(docker) +ns.add_collection(docker_tasks, "docker") ns.add_collection(dogstatsd) ns.add_collection(epforwarder) ns.add_collection(msi) diff --git a/tasks/agent.py b/tasks/agent.py index dd18b4ebd2bea..b78f42b538e44 100644 --- a/tasks/agent.py +++ b/tasks/agent.py @@ -18,7 +18,7 @@ from invoke.exceptions import Exit, ParseError from .build_tags import filter_incompatible_tags, get_build_tags, get_default_build_tags -from .docker import pull_base_images +from .docker_tasks import pull_base_images from .flavor import AgentFlavor from .go import deps from .rtloader import clean as rtloader_clean @@ -45,6 +45,8 @@ AGENT_CORECHECKS = [ "container", "containerd", + "container_image", + "container_lifecycle", "cpu", "cri", "snmp", @@ -59,12 +61,14 @@ "ntp", "oom_kill", "oracle-dbm", + "sbom", "systemd", "tcp_queue_length", "uptime", "winkmem", "winproc", "jetson", + "openmetrics", ] IOT_AGENT_CORECHECKS = [ diff --git a/tasks/build_tags.py b/tasks/build_tags.py index 6b14a039ad198..992bce6f21c52 100644 --- a/tasks/build_tags.py +++ b/tasks/build_tags.py @@ -114,10 +114,10 @@ ) # SECURITY_AGENT_TAGS lists the tags necessary to build the security agent -SECURITY_AGENT_TAGS = {"netcgo", "secrets", "docker", "containerd", "kubeapiserver", "kubelet", "podman", "zlib"} +SECURITY_AGENT_TAGS = {"netcgo", "secrets", "docker", "containerd", "kubeapiserver", "kubelet", "podman", "zlib", "ec2"} # SYSTEM_PROBE_TAGS lists the tags necessary to build system-probe -SYSTEM_PROBE_TAGS = AGENT_TAGS.union({"clusterchecks", "linux_bpf", "npm"}).difference({"python", "trivy"}) +SYSTEM_PROBE_TAGS = AGENT_TAGS.union({"clusterchecks", "linux_bpf", "npm"}).difference({"python", "trivy", "systemd"}) # TRACE_AGENT_TAGS lists the tags that have to be added when the trace-agent TRACE_AGENT_TAGS = {"docker", "containerd", "kubeapiserver", "kubelet", "otlp", "netcgo", "podman", "secrets"} diff --git a/tasks/docker.py b/tasks/docker_tasks.py similarity index 100% rename from tasks/docker.py rename to tasks/docker_tasks.py diff --git a/tasks/github.py b/tasks/github.py index 83436d587ed63..00dec5db9df08 100644 --- a/tasks/github.py +++ b/tasks/github.py @@ -2,7 +2,12 @@ from invoke import Exit, task -from .libs.github_actions_tools import download_artifacts, follow_workflow_run, trigger_macos_workflow +from .libs.github_actions_tools import ( + download_artifacts, + follow_workflow_run, + print_workflow_conclusion, + trigger_macos_workflow, +) from .utils import DEFAULT_BRANCH, load_release_versions @@ -33,10 +38,15 @@ def trigger_macos_build( version_cache_file_content=version_cache, ) - follow_workflow_run(run_id) + workflow_conclusion = follow_workflow_run(run_id) + + print_workflow_conclusion(workflow_conclusion) download_artifacts(run_id, destination) + if workflow_conclusion != "success": + raise Exit(code=1) + @task def trigger_macos_test( @@ -56,10 +66,15 @@ def trigger_macos_test( python_runtimes=python_runtimes, ) - follow_workflow_run(run_id) + workflow_conclusion = follow_workflow_run(run_id) + + print_workflow_conclusion(workflow_conclusion) download_artifacts(run_id, destination) + if workflow_conclusion != "success": + raise Exit(code=1) + @task def lint_codeowner(_): diff --git a/tasks/libs/copyright.py b/tasks/libs/copyright.py index c967128a12212..8150229c285c3 100755 --- a/tasks/libs/copyright.py +++ b/tasks/libs/copyright.py @@ -47,6 +47,7 @@ # line of the file HEADER_EXCLUSION_REGEX = [ '^// Code generated ', + '^// protoc ', '^//go:generate ', '^// AUTOGENERATED FILE: ', '^// Copyright.* OpenTelemetry Authors', diff --git a/tasks/libs/github_actions_tools.py b/tasks/libs/github_actions_tools.py index 20b69f2934692..d6e1f86cd9e17 100644 --- a/tasks/libs/github_actions_tools.py +++ b/tasks/libs/github_actions_tools.py @@ -101,7 +101,7 @@ def get_macos_workflow_run_for_ref(workflow="macos.yaml", github_action_ref="mas def follow_workflow_run(run_id): """ - Follow the workflow run until completion. + Follow the workflow run until completion and return its conclusion. """ try: @@ -136,12 +136,7 @@ def follow_workflow_run(run_id): conclusion = run["conclusion"] if status == "completed": - if conclusion == "success": - print(color_message("Workflow run succeeded", "green")) - return - else: - print(color_message(f"Workflow run ended with state: {conclusion}", "red")) - raise Exit(code=1) + return conclusion else: print(f"Workflow still running... ({minutes}m)") # For some unknown reason, in Gitlab these lines do not get flushed, leading to not being @@ -152,6 +147,16 @@ def follow_workflow_run(run_id): sleep(60) +def print_workflow_conclusion(conclusion): + """ + Print the workflow conclusion + """ + if conclusion == "success": + print(color_message("Workflow run succeeded", "green")) + else: + print(color_message(f"Workflow run ended with state: {conclusion}", "red")) + + def download_artifacts(run_id, destination="."): """ Download all artifacts for a given job in the specified location. diff --git a/tasks/libs/pipeline_notifications.py b/tasks/libs/pipeline_notifications.py index 5e43389040a59..4c8a0f395eb55 100644 --- a/tasks/libs/pipeline_notifications.py +++ b/tasks/libs/pipeline_notifications.py @@ -34,6 +34,9 @@ "@datadog/software-integrity-and-trust": "#sit", "@datadog/single-machine-performance": "#single-machine-performance", "@datadog/agent-integrations": "#agent-integrations", + "@datadog/debugger": "#debugger-ops-prod", + "@datadog/database-monitoring": "#database-monitoring", + "@datadog/agent-cspm": "#k9-cspm-ops", } diff --git a/tasks/licenses.py b/tasks/licenses.py index d2ec53982068d..f25297bf51aa8 100644 --- a/tasks/licenses.py +++ b/tasks/licenses.py @@ -61,9 +61,9 @@ 'gopkg.in/Knetic/govaluate.v3', ] + # FIXME: This doesn't include licenses for non-go dependencies, like the javascript libs we use for the web gui def get_licenses_list(ctx): - # we need the full vendor tree in order to perform this analysis from .go import deps_vendored @@ -77,11 +77,34 @@ def get_licenses_list(ctx): shutil.rmtree("vendor/") +def is_valid_quote(copyright): + stack = [] + quotes_to_check = ["'", '"'] + for c in copyright: + if c in quotes_to_check: + if stack and stack[-1] == c: + stack.pop() + else: + stack.append(c) + return len(stack) == 0 + + def licenses_csv(licenses): licenses.sort(key=lambda lic: lic["package"]) def fmt_copyright(lic): - copyright = ' | '.join(sorted(lic['copyright'])) + # discards copyright with invalid quotes to ensure generated csv is valid + filtered_copyright = [] + for copyright in lic["copyright"]: + if is_valid_quote(copyright): + filtered_copyright.append(copyright) + else: + print( + f'copyright {copyright} was discarded because it contains invalid quotes. If you want to fix this discarded Copyright, you can modify the .copyright-overrides.yml file to fix the bad-quotes copyright' + ) + if len(copyright) == 0: + copyright = "UNKNOWN" + copyright = ' | '.join(sorted(filtered_copyright)) # quote for inclusion in CSV, if necessary if ',' in copyright: copyright = copyright.replace('"', '""') diff --git a/tasks/modules.py b/tasks/modules.py index 9b267fc21196e..24599cde8f31f 100644 --- a/tasks/modules.py +++ b/tasks/modules.py @@ -130,10 +130,16 @@ def dependency_path(self, agent_version): "test/e2e/containers/otlp_sender": GoModule( "test/e2e/containers/otlp_sender", condition=lambda: False, should_tag=False ), - "test/new-e2e": GoModule("test/new-e2e", condition=lambda: False, should_tag=False), + "test/new-e2e": GoModule( + "test/new-e2e", + independent=True, + should_tag=False, + targets=["./runner", "./utils/e2e/client"], + ), "test/fakeintake": GoModule("test/fakeintake", independent=True, should_tag=False), "pkg/obfuscate": GoModule("pkg/obfuscate", independent=True), "pkg/gohai": GoModule("pkg/gohai", independent=True, importable=False), + "pkg/proto": GoModule("pkg/proto", independent=True), "pkg/trace": GoModule("pkg/trace", independent=True), "pkg/security/secl": GoModule("pkg/security/secl", independent=True), "pkg/remoteconfig/state": GoModule("pkg/remoteconfig/state", independent=True), diff --git a/tasks/msi.py b/tasks/msi.py index 3006e65c18bbf..3c58041e0b485 100644 --- a/tasks/msi.py +++ b/tasks/msi.py @@ -12,7 +12,7 @@ from invoke.exceptions import Exit, UnexpectedExit from tasks.ssm import get_pfx_pass, get_signing_cert -from tasks.utils import get_version +from tasks.utils import get_version, load_release_versions # constants OUTPUT_PATH = os.path.join(os.getcwd(), "omnibus", "pkg") @@ -43,8 +43,8 @@ def _get_vs_build_command(cmd, vstudio_root=None): return cmd -def _get_env(ctx, major_version='7', python_runtimes='3'): - env = {} +def _get_env(ctx, major_version='7', python_runtimes='3', release_version='nightly'): + env = load_release_versions(ctx, release_version) env['PACKAGE_VERSION'] = get_version( ctx, include_git=True, url_safe=True, major_version=major_version, include_pipeline_id=True @@ -62,7 +62,16 @@ def _get_env(ctx, major_version='7', python_runtimes='3'): return env -def _build(ctx, project='', vstudio_root=None, arch="x64", major_version='7', python_runtimes='3', debug=False): +def _build( + ctx, + project='', + vstudio_root=None, + arch="x65", + major_version='7', + python_runtimes='3', + release_version='nightly', + debug=False, +): """ Build the MSI installer builder, i.e. the program that can build an MSI """ @@ -70,7 +79,7 @@ def _build(ctx, project='', vstudio_root=None, arch="x64", major_version='7', py print("Building the MSI installer is only for available on Windows") raise Exit(code=1) - env = _get_env(ctx, major_version, python_runtimes) + env = _get_env(ctx, major_version, python_runtimes, release_version) print(f"arch is {arch}") cmd = "" @@ -113,7 +122,9 @@ def _build(ctx, project='', vstudio_root=None, arch="x64", major_version='7', py @task -def build(ctx, vstudio_root=None, arch="x64", major_version='7', python_runtimes='3', debug=False): +def build( + ctx, vstudio_root=None, arch="x64", major_version='7', python_runtimes='3', release_version='nightly', debug=False +): """ Build the MSI installer for the agent """ @@ -124,12 +135,13 @@ def build(ctx, vstudio_root=None, arch="x64", major_version='7', python_runtimes arch=arch, major_version=major_version, python_runtimes=python_runtimes, + release_version=release_version, debug=debug, ) configuration = "Release" if debug: configuration = "Debug" - env = _get_env(ctx, major_version, python_runtimes) + env = _get_env(ctx, major_version, python_runtimes, release_version) # Run the builder to produce the MSI succeeded = ctx.run( f'cd {BUILD_SOURCE_DIR}\\WixSetup && {BUILD_OUTPUT_DIR}\\bin\\{arch}\\{configuration}\\WixSetup.exe', @@ -144,7 +156,9 @@ def build(ctx, vstudio_root=None, arch="x64", major_version='7', python_runtimes @task -def test(ctx, vstudio_root=None, arch="x64", major_version='7', python_runtimes='3', debug=False): +def test( + ctx, vstudio_root=None, arch="x64", major_version='7', python_runtimes='3', release_version='nightly', debug=False +): """ Run the unit test for the MSI installer for the agent """ @@ -154,12 +168,13 @@ def test(ctx, vstudio_root=None, arch="x64", major_version='7', python_runtimes= arch=arch, major_version=major_version, python_runtimes=python_runtimes, + release_version=release_version, debug=debug, ) configuration = "Release" if debug: configuration = "Debug" - env = _get_env(ctx, major_version, python_runtimes) + env = _get_env(ctx, major_version, python_runtimes, release_version) # Generate the config file if not ctx.run( diff --git a/tasks/pipeline.py b/tasks/pipeline.py index b48cfef53ec92..cfc15bf87196a 100644 --- a/tasks/pipeline.py +++ b/tasks/pipeline.py @@ -427,7 +427,7 @@ def check_notify_teams(_): print( "Error: Some teams in CODEOWNERS don't have their slack notification channel specified in the GITHUB_SLACK_MAP !!" ) - Exit(code=1) + raise Exit(code=1) else: print("All CODEOWNERS teams have their slack notification channel specified !") diff --git a/tasks/release.py b/tasks/release.py index 346619512cf83..ec159012cee86 100644 --- a/tasks/release.py +++ b/tasks/release.py @@ -17,12 +17,11 @@ from .libs.common.color import color_message from .libs.common.github_api import GithubAPI, get_github_token from .libs.common.gitlab import Gitlab, get_gitlab_token -from .libs.common.remote_api import APIError from .libs.common.user_interactions import yes_no_question from .libs.version import Version from .modules import DEFAULT_MODULES from .pipeline import run -from .utils import DEFAULT_BRANCH, get_version, nightly_entry_for, release_entry_for +from .utils import DEFAULT_BRANCH, check_clean_branch_state, get_version, nightly_entry_for, release_entry_for # Generic version regex. Aims to match: # - X.Y.Z @@ -936,41 +935,6 @@ def check_base_branch(branch, release_version): return branch == DEFAULT_BRANCH or branch == release_version.branch() -def check_uncommitted_changes(ctx): - """ - Checks if there are uncommitted changes in the local git repository. - """ - modified_files = ctx.run("git --no-pager diff --name-only HEAD | wc -l", hide=True).stdout.strip() - - # Return True if at least one file has uncommitted changes. - return modified_files != "0" - - -def check_local_branch(ctx, branch): - """ - Checks if the given branch exists locally - """ - matching_branch = ctx.run(f"git --no-pager branch --list {branch} | wc -l", hide=True).stdout.strip() - - # Return True if a branch is returned by git branch --list - return matching_branch != "0" - - -def check_upstream_branch(github, branch): - """ - Checks if the given branch already exists in the upstream repository - """ - try: - github_branch = github.get_branch(branch) - except APIError as e: - if e.status_code == 404: - return False - raise e - - # Return True if the branch exists - return github_branch and github_branch.get('name', False) - - def parse_major_versions(major_versions): return sorted(int(x) for x in major_versions.split(",")) @@ -1092,19 +1056,11 @@ def create_rc(ctx, major_versions="6,7", patch_version=False, upstream="origin") print(color_message("Checking repository state", "bold")) ctx.run("git fetch") - if check_uncommitted_changes(ctx): - raise Exit( - color_message( - "There are uncomitted changes in your repository. Please commit or stash them before trying again.", - "red", - ), - code=1, - ) - # Check that the current and update branches are valid current_branch = ctx.run("git rev-parse --abbrev-ref HEAD", hide=True).stdout.strip() update_branch = f"release/{new_highest_version}" + check_clean_branch_state(ctx, github, update_branch) if not check_base_branch(current_branch, new_highest_version): raise Exit( color_message( @@ -1114,24 +1070,6 @@ def create_rc(ctx, major_versions="6,7", patch_version=False, upstream="origin") code=1, ) - if check_local_branch(ctx, update_branch): - raise Exit( - color_message( - f"The branch {update_branch} already exists locally. Please remove it before trying again.", - "red", - ), - code=1, - ) - - if check_upstream_branch(github, update_branch): - raise Exit( - color_message( - f"The branch {update_branch} already exists upstream. Please remove it before trying again.", - "red", - ), - code=1, - ) - # Find milestone based on what the next final version is. If the milestone does not exist, fail. milestone_name = str(new_final_version) @@ -1403,14 +1341,8 @@ def unfreeze(ctx, base_directory="~/dd", major_versions="6,7", upstream="origin" print(color_message("Checking repository state", "bold")) ctx.run("git fetch") - if check_uncommitted_changes(ctx): - raise Exit( - color_message( - "There are uncomitted changes in your repository. Please commit or stash them before trying again.", - "red", - ), - code=1, - ) + github = GithubAPI(repository=REPOSITORY_NAME, api_token=get_github_token()) + check_clean_branch_state(ctx, github, release_branch) if not yes_no_question( f"This task will create new branches with the name '{release_branch}' in repositories: {', '.join(UNFREEZE_REPOS)}. Is this OK?", diff --git a/tasks/system_probe.py b/tasks/system_probe.py index e2aac44ab1625..ef1cb7e8632c7 100644 --- a/tasks/system_probe.py +++ b/tasks/system_probe.py @@ -1477,7 +1477,6 @@ def print_failed_tests(_, output_dir): tgz.extractall(path=unpack_dir) for test_json in glob.glob(f"{unpack_dir}/*.json"): - bundle, _ = os.path.splitext(os.path.basename(test_json)) with open(test_json) as tf: for line in tf: json_test = json.loads(line.strip()) @@ -1487,7 +1486,7 @@ def print_failed_tests(_, output_dir): action = json_test["Action"] if action == "fail": - print(f"FAIL: [{test_platform}] [{bundle}] {package} {name}") + print(f"FAIL: [{test_platform}] {package} {name}") fail_count += 1 if fail_count > 0: diff --git a/tasks/test.py b/tasks/test.py index e74f37a6e6670..674579f2c9aef 100644 --- a/tasks/test.py +++ b/tasks/test.py @@ -93,6 +93,7 @@ def environ(env): ] TOOL_LIST_PROTO = [ + 'github.com/favadi/protoc-go-inject-tag', 'github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway', 'github.com/golang/protobuf/protoc-gen-go', 'github.com/golang/mock/mockgen', @@ -1029,6 +1030,6 @@ def junit_upload(_, tgz_path): def junit_macos_repack(_, infile, outfile): """ Repacks JUnit tgz file from macOS Github Action run, so it would - containt correct job name and job URL. + contain correct job name and job URL. """ repack_macos_junit_tar(infile, outfile) diff --git a/tasks/unit-tests/licenses_test.py b/tasks/unit-tests/licenses_test.py new file mode 100644 index 0000000000000..0d996d811f5a4 --- /dev/null +++ b/tasks/unit-tests/licenses_test.py @@ -0,0 +1,15 @@ +import unittest + +from ..licenses import is_valid_quote + + +class TestLicensesMethod(unittest.TestCase): + def test_valid_quotes(self): + self.assertTrue(is_valid_quote('"\'hello\'"')) + + def test_invalid_quotes(self): + self.assertFalse(is_valid_quote('""hello' '"""')) + + +if __name__ == '__main__': + unittest.main() diff --git a/tasks/utils.py b/tasks/utils.py index 264f512340400..5d6615f58ecb4 100644 --- a/tasks/utils.py +++ b/tasks/utils.py @@ -12,6 +12,9 @@ from invoke import task from invoke.exceptions import Exit +from .libs.common.color import color_message +from .libs.common.remote_api import APIError + # constants ORG_PATH = "github.com/DataDog" DEFAULT_BRANCH = "main" @@ -417,8 +420,27 @@ def get_version( def get_version_numeric_only(ctx, major_version='7'): # we only need the git info for the non omnibus builds, omnibus includes all this information by default + version = "" + pipeline_id = os.getenv("CI_PIPELINE_ID") + project_name = os.getenv("CI_PROJECT_NAME") + if pipeline_id and pipeline_id.isdigit() and project_name == REPO_NAME: + try: + if not os.path.exists(AGENT_VERSION_CACHE_NAME): + ctx.run( + f"aws s3 cp s3://dd-ci-artefacts-build-stable/datadog-agent/{pipeline_id}/{AGENT_VERSION_CACHE_NAME} .", + hide="stdout", + ) - version, *_ = query_version(ctx, major_version_hint=major_version) + with open(AGENT_VERSION_CACHE_NAME, "r") as file: + cache_data = json.load(file) + + version, *_ = cache_data[major_version] + except (IOError, json.JSONDecodeError, IndexError) as e: + # If a cache file is found but corrupted we ignore it. + print(f"Error while recovering the version from {AGENT_VERSION_CACHE_NAME}: {e}") + version = "" + if not version: + version, *_ = query_version(ctx, major_version_hint=major_version) return version @@ -460,3 +482,70 @@ def nightly_entry_for(agent_major_version): def release_entry_for(agent_major_version): return f"release-a{agent_major_version}" + + +def check_clean_branch_state(ctx, github, branch): + """ + Check we are in a clean situation to create a new branch: + No uncommitted change, and branch doesn't exist locally or upstream + """ + if check_uncommitted_changes(ctx): + raise Exit( + color_message( + "There are uncomitted changes in your repository. Please commit or stash them before trying again.", + "red", + ), + code=1, + ) + if check_local_branch(ctx, branch): + raise Exit( + color_message( + f"The branch {branch} already exists locally. Please remove it before trying again.", + "red", + ), + code=1, + ) + + if check_upstream_branch(github, branch): + raise Exit( + color_message( + f"The branch {branch} already exists upstream. Please remove it before trying again.", + "red", + ), + code=1, + ) + + +def check_uncommitted_changes(ctx): + """ + Checks if there are uncommitted changes in the local git repository. + """ + modified_files = ctx.run("git --no-pager diff --name-only HEAD | wc -l", hide=True).stdout.strip() + + # Return True if at least one file has uncommitted changes. + return modified_files != "0" + + +def check_local_branch(ctx, branch): + """ + Checks if the given branch exists locally + """ + matching_branch = ctx.run(f"git --no-pager branch --list {branch} | wc -l", hide=True).stdout.strip() + + # Return True if a branch is returned by git branch --list + return matching_branch != "0" + + +def check_upstream_branch(github, branch): + """ + Checks if the given branch already exists in the upstream repository + """ + try: + github_branch = github.get_branch(branch) + except APIError as e: + if e.status_code == 404: + return False + raise e + + # Return True if the branch exists + return github_branch and github_branch.get('name', False) diff --git a/tasks/winbuildscripts/Generate-Chocolatey-Package.ps1 b/tasks/winbuildscripts/Generate-Chocolatey-Package.ps1 index d8129b5dee969..7ebfffc60fb32 100644 --- a/tasks/winbuildscripts/Generate-Chocolatey-Package.ps1 +++ b/tasks/winbuildscripts/Generate-Chocolatey-Package.ps1 @@ -61,8 +61,18 @@ Invoke-WebRequest -Uri "https://raw.githubusercontent.com/DataDog/datadog-agent/ Write-Host "Generating Chocolatey $installMethod package version $agentVersion in $outputDirectory" -if ([System.Net.WebRequest]::Create($url).GetResponse().StatusCode -ne 200) { - Write-Error "Package $($url) doesn't exists, cannot continue publishing process." +$statusCode = -1 +try { + $statusCode = (iwr $url).StatusCode +} +catch [System.Net.WebException] { + if ($_.Exception.Status -eq "ProtocolError") { + $statusCode = [int]$_.Exception.Response.StatusCode + } +} + +if ($statusCode -ne 200) { + Write-Warning "Package $($url) doesn't exists yet, make sure it exists before publishing the Chocolatey package !" } if (!(Test-Path $outputDirectory)) { diff --git a/tasks/winbuildscripts/Update-Winget.ps1 b/tasks/winbuildscripts/Update-Winget.ps1 index 7f6772c2a4dd4..045dacde15c3b 100644 --- a/tasks/winbuildscripts/Update-Winget.ps1 +++ b/tasks/winbuildscripts/Update-Winget.ps1 @@ -4,6 +4,18 @@ Set-Location c:\mnt # Install dev tools, including invoke pip3 install -r requirements.txt +# Update the repo +$ghCliInstallResult = Start-Process "msiexec" -ArgumentList "/qn /i https://github.com/cli/cli/releases/download/v2.29.0/gh_2.29.0_windows_amd64.msi /log install.log" -NoNewWindow -Wait -Passthru +if ($ghCliInstallResult.ExitCode -ne 0) { + Get-Content install.log | Write-Output + Write-Error ("Failed to install Github CLI: {0}" -f $ghCliInstallResult.ExitCode) +} else { + # Github CLI uses the GH_TOKEN + $env:GH_TOKEN = $env:WINGET_GITHUB_ACCESS_TOKEN + & 'C:\Program Files\GitHub CLI\gh.exe' repo sync https://github.com/robot-github-winget-datadog-agent/winget-pkgs.git --source microsoft/winget-pkgs +} +return + $rawAgentVersion = (inv agent.version) Write-Host "Detected agent version ${rawAgentVersion}" $m = [regex]::match($rawAgentVersion, "(\d+\.\d+\.\d+)(-rc.(\d+))?") diff --git a/tasks/winbuildscripts/dobuild.bat b/tasks/winbuildscripts/dobuild.bat index c898b9902e1d3..74a516b73f9c0 100644 --- a/tasks/winbuildscripts/dobuild.bat +++ b/tasks/winbuildscripts/dobuild.bat @@ -56,6 +56,6 @@ inv -e %OMNIBUS_BUILD% %OMNIBUS_ARGS% --skip-deps --major-version %MAJOR_VERSION REM only build MSI for main targets for now. if "%OMNIBUS_TARGET%" == "main" ( - @echo "inv -e msi.build --major-version %MAJOR_VERSION% --python-runtimes "%PY_RUNTIMES%" - inv -e msi.build --major-version %MAJOR_VERSION% --python-runtimes "%PY_RUNTIMES%" || exit /b 106 + @echo "inv -e msi.build --major-version %MAJOR_VERSION% --python-runtimes "%PY_RUNTIMES%" --release-version %RELEASE_VERSION% + inv -e msi.build --major-version %MAJOR_VERSION% --python-runtimes "%PY_RUNTIMES%" --release-version %RELEASE_VERSION% || exit /b 106 ) diff --git a/tasks/winbuildscripts/unittests.ps1 b/tasks/winbuildscripts/unittests.ps1 index 67890373e9aef..0e15ce2985c6f 100644 --- a/tasks/winbuildscripts/unittests.ps1 +++ b/tasks/winbuildscripts/unittests.ps1 @@ -89,7 +89,7 @@ if($err -ne 0){ } & inv -e install-tools -& inv -e test --skip-linters --junit-tar="$Env:JUNIT_TAR" --race --profile --rerun-fails=2 --cpus 4 --arch $archflag --python-runtimes="$Env:PY_RUNTIMES" --python-home-2=$Env:Python2_ROOT_DIR --python-home-3=$Env:Python3_ROOT_DIR --save-result-json C:\mnt\test_output.json +& inv -e test --skip-linters --junit-tar="$Env:JUNIT_TAR" --race --profile --rerun-fails=2 --cpus 8 --arch $archflag --python-runtimes="$Env:PY_RUNTIMES" --python-home-2=$Env:Python2_ROOT_DIR --python-home-3=$Env:Python3_ROOT_DIR --save-result-json C:\mnt\test_output.json $err = $LASTEXITCODE Write-Host Test result is $err diff --git a/test/e2e/cws-tests/tests/test_e2e_cspm_kubernetes.py b/test/e2e/cws-tests/tests/test_e2e_cspm_kubernetes.py index 77aeaa892a7ab..2fea068b6825f 100644 --- a/test/e2e/cws-tests/tests/test_e2e_cspm_kubernetes.py +++ b/test/e2e/cws-tests/tests/test_e2e_cspm_kubernetes.py @@ -113,36 +113,6 @@ class TestE2EKubernetes(unittest.TestCase): ], } expectedFindingsWorkerNode = { - "cis-kubernetes-1.5.1-4.1.1": [ - { - "result": "error", - } - ], - "cis-kubernetes-1.5.1-4.1.2": [ - { - "result": "error", - } - ], - "cis-kubernetes-1.5.1-4.1.3": [ - { - "result": "error", - } - ], - "cis-kubernetes-1.5.1-4.1.4": [ - { - "result": "error", - } - ], - "cis-kubernetes-1.5.1-4.1.7": [ - { - "result": "error", - } - ], - "cis-kubernetes-1.5.1-4.1.8": [ - { - "result": "error", - } - ], "cis-kubernetes-1.5.1-4.2.1": [ { "result": "failed", diff --git a/test/fakeintake/aggregator/common.go b/test/fakeintake/aggregator/common.go index 314d4746d4e81..ee241d1481200 100644 --- a/test/fakeintake/aggregator/common.go +++ b/test/fakeintake/aggregator/common.go @@ -10,6 +10,7 @@ import ( "compress/gzip" "compress/zlib" "io" + "sort" "github.com/DataDog/datadog-agent/test/fakeintake/api" ) @@ -78,6 +79,15 @@ func (agg *Aggregator[P]) ContainsPayloadNameAndTags(name string, tags []string) return false } +func (agg *Aggregator[P]) GetNames() []string { + names := []string{} + for name := range agg.payloadsByName { + names = append(names, name) + } + sort.Strings(names) + return names +} + func enflate(payload []byte, encoding string) (enflated []byte, err error) { rc, err := getReadCloserForEncoding(payload, encoding) if err != nil { diff --git a/test/fakeintake/client/client.go b/test/fakeintake/client/client.go index a7f2efb5d1fa2..de0da3971d3bd 100644 --- a/test/fakeintake/client/client.go +++ b/test/fakeintake/client/client.go @@ -37,7 +37,7 @@ func NewClient(fakeIntakeURL string) *Client { } func (c *Client) getMetrics() error { - payloads, err := c.getFakePayloads("api/v2/metrics") + payloads, err := c.getFakePayloads("/api/v2/series") if err != nil { return err } @@ -45,7 +45,7 @@ func (c *Client) getMetrics() error { } func (c *Client) getCheckRuns() error { - payloads, err := c.getFakePayloads("api/v1/check_run") + payloads, err := c.getFakePayloads("/api/v1/check_run") if err != nil { return err } @@ -53,7 +53,7 @@ func (c *Client) getCheckRuns() error { } func (c *Client) getLogs() error { - payloads, err := c.getFakePayloads("api/v2/logs") + payloads, err := c.getFakePayloads("/api/v2/logs") if err != nil { return err } diff --git a/test/kitchen/drivers/azure-driver.yml b/test/kitchen/drivers/azure-driver.yml index 260ad092443a4..a88d84ec53829 100644 --- a/test/kitchen/drivers/azure-driver.yml +++ b/test/kitchen/drivers/azure-driver.yml @@ -98,7 +98,6 @@ platforms: windows = platform_name.include?("win") sles15 = platform_name.include?("sles-15") - windows2008 = windows && platform_name.include?("2008") debian9 = platform_name.include?("debian-9") if windows @@ -146,15 +145,6 @@ platforms: location: <%= location %> <% if windows %> vm_name: ddat<%= platform[0] %> - <% if windows2008 %> - winrm_powershell_script: |- - winrm quickconfig -q - winrm set winrm/config/winrs '@{MaxMemoryPerShellMB="2048"}' - winrm set winrm/config '@{MaxTimeoutms="1800000"}' - winrm set winrm/config/service '@{AllowUnencrypted="true"}' - winrm set winrm/config/service/auth '@{Basic="true"}' - netsh advfirewall firewall set rule name="Windows Remote Management (HTTP-In)" profile=public protocol=tcp localport=5985 remoteip=localsubnet new remoteip=any - <% end %> <% else %> vm_name: dd-agent-testing-<%= platform[0] %>-azure <% end %> diff --git a/test/kitchen/drivers/ec2-driver.yml b/test/kitchen/drivers/ec2-driver.yml index 377d677565943..7d33008299bf3 100644 --- a/test/kitchen/drivers/ec2-driver.yml +++ b/test/kitchen/drivers/ec2-driver.yml @@ -76,7 +76,8 @@ platforms: windows = platform_name.include?("win") sles15 = platform_name.include?("sles-15") al2022 = platform_name.include?("amazonlinux2022") - windows2008 = windows && platform_name.include?("2008") + al2023 = platform_name.include?("amazonlinux2023") + fedora = platform_name.include?("fedora") if windows windows_platforms << platform_name @@ -126,7 +127,7 @@ platforms: volume_type: gp2 volume_size: 100 delete_on_termination: true - <% if allow_rsa_key || al2022 %> + <% if allow_rsa_key || al2022 || al2023 || fedora %> user_data: | #!/bin/sh <% end %> @@ -134,10 +135,10 @@ platforms: echo PubkeyAcceptedKeyTypes=+ssh-rsa >> /etc/ssh/sshd_config service ssh reload <% end %> - <% if al2022 %> + <% if al2022 || al2023 || fedora %> sudo dnf install -y libxcrypt-compat <% end %> - <% if (ENV['KITCHEN_CI_MOUNT_PATH'] && ENV['KITCHEN_CI_ROOT_PATH']) || al2022 %> + <% if (ENV['KITCHEN_CI_MOUNT_PATH'] && ENV['KITCHEN_CI_ROOT_PATH']) || al2022 || al2023 || fedora %> lifecycle: post_create: <% end %> @@ -147,7 +148,7 @@ platforms: sudo chmod a+rwx <%= ENV['KITCHEN_CI_MOUNT_PATH'] %>; sudo ln -s <%= ENV['KITCHEN_CI_MOUNT_PATH'] %> <%= ENV['KITCHEN_CI_ROOT_PATH'] %>; <% end %> - <% if al2022 %> + <% if al2022 || al2023 || fedora %> # Add a hook after creating the host, to make sure we wait until the user_data # script has been run. # Snippet taken from the kitchen docs: https://kitchen.ci/docs/reference/lifecycle-hooks/ @@ -177,12 +178,12 @@ platforms: name: winrm elevated: true username: administrator - + <% else %> connection_retries: 30 connection_retry_sleep: 2 <% end %> - <% if sles15 || al2022 %> + <% if sles15 || al2022 || al2023 %> # The AWS EC2 driver doesn't recognize Amazon Linux 2022 yet, # therefore it doesn't know that it needs to use ec2-user. username: ec2-user diff --git a/test/kitchen/drivers/hyperv-driver.yml b/test/kitchen/drivers/hyperv-driver.yml index f0553d81789ba..3626cf008479f 100644 --- a/test/kitchen/drivers/hyperv-driver.yml +++ b/test/kitchen/drivers/hyperv-driver.yml @@ -51,7 +51,6 @@ platforms: windows = platform_name.include?("win") sles15 = platform_name.include?("sles-15") - windows2008 = windows && platform_name.include?("2008") if windows windows_platforms << platform_name diff --git a/test/kitchen/platforms.json b/test/kitchen/platforms.json index 0ac2caa84b420..bd3024842f9cf 100644 --- a/test/kitchen/platforms.json +++ b/test/kitchen/platforms.json @@ -40,11 +40,25 @@ "ec2": { "x86_64": { "debian-10": "ami-09c5dc3046df4741f", - "debian-11": "ami-09e24b0cfe072ecef" + "debian-11": "ami-09e24b0cfe072ecef", + "debian-12": "ami-0076a5c1d029e906a" }, "arm64": { "debian-10": "ami-0d35a4c5725fd0da7", - "debian-11": "ami-03ea090ddd75eb738" + "debian-11": "ami-03ea090ddd75eb738", + "debian-12": "ami-02aab8d5301cb8d68" + } + } + }, + "fedora": { + "ec2": { + "x86_64": { + "fedora-36": "ami-08b7bda26f4071b80", + "fedora-37": "ami-023fb534213ca41da" + }, + "arm64": { + "fedora-36": "ami-01925eb0821988986", + "fedora-37": "ami-0e9221491fc51fca6" } } }, @@ -81,14 +95,16 @@ "ubuntu-16-04": "ami-0b0ea68c435eb488d", "ubuntu-18-04": "ami-0ee23bfc74a881de5", "ubuntu-20-04": "ami-079ca844e323047c2", - "ubuntu-22-04": "ami-08c40ec9ead489470" + "ubuntu-22-04": "ami-08c40ec9ead489470", + "ubuntu-23-04": "ami-062b1c3c00754c48b" }, "arm64": { "ubuntu-18-04": "ami-02ed82f3a38303e6f", "ubuntu-20-04": "ami-0b75998a97c952252", "ubuntu-20-04-2": "ami-0a82127206c2824a1", "ubuntu-21-04": "ami-044f0ceee8e885e87", - "ubuntu-22-04": "ami-02ddaf75821f25213" + "ubuntu-22-04": "ami-02ddaf75821f25213", + "ubuntu-23-04": "ami-05fab5da2d7fe0b0b" } }, "vagrant": { @@ -112,20 +128,21 @@ "ec2": { "x86_64": { "amazonlinux2-4-14": "ami-038b3df3312ddf25d", - "amazonlinux2-5-10": "ami-033b95fb8079dc481", - "amazonlinux2022-5-15": "ami-0309aede310b9cc1f" + "amazonlinux2-5-10": "ami-06a0cd9728546d178", + "amazonlinux2022-5-15": "ami-0309aede310b9cc1f", + "amazonlinux2023": "ami-0889a44b331db0194" }, "arm64": { "amazonlinux2-4-14": "ami-090230ed0c6b13c74", - "amazonlinux2-5-10": "ami-0e449176cecc3e577", - "amazonlinux2022-5-15": "ami-0a8495f6303122235" + "amazonlinux2-5-10": "ami-09e51988f56677f44", + "amazonlinux2022-5-15": "ami-0a8495f6303122235", + "amazonlinux2023": "ami-08fc6fb8ad2e794bb" } } }, "windows": { "azure": { "x86_64": { - "win2008r2": "id,/subscriptions/a53d2c33-d372-4e36-9e33-eaf32ce95b28/resourceGroups/agentcustomimages/providers/Microsoft.Compute/galleries/agentcustomimages/images/Windows2008-R2-SP1/versions/1.0.0", "win2012": "id,/subscriptions/a53d2c33-d372-4e36-9e33-eaf32ce95b28/resourceGroups/agentcustomimages/providers/Microsoft.Compute/galleries/agentcustomimages/images/Windows-2012/versions/1.0.0", "win2012r2": "id,/subscriptions/a53d2c33-d372-4e36-9e33-eaf32ce95b28/resourceGroups/agentcustomimages/providers/Microsoft.Compute/galleries/agentcustomimages/images/Windows-2012-R2/versions/1.0.1", "win2016": "urn,MicrosoftWindowsServer:WindowsServer:2016-Datacenter-Server-Core:14393.5429.221014", diff --git a/test/kitchen/site-cookbooks/dd-system-probe-check/recipes/linux.rb b/test/kitchen/site-cookbooks/dd-system-probe-check/recipes/linux.rb index d4b1de94b1505..ad9f86b6dc665 100644 --- a/test/kitchen/site-cookbooks/dd-system-probe-check/recipes/linux.rb +++ b/test/kitchen/site-cookbooks/dd-system-probe-check/recipes/linux.rb @@ -67,6 +67,8 @@ package_name 'nmap-ncat' when 'redhat', 'centos', 'fedora' package_name 'nc' + when 'debian', 'ubuntu' + package_name 'netcat-openbsd' else package_name 'netcat' end @@ -80,7 +82,7 @@ case node[:platform] when 'amazon' case node[:platform_version] - when '2022' + when '2022', '2023' package_name 'curl-minimal' else package_name 'curl' diff --git a/test/kitchen/test/integration/system-probe-test/rspec_datadog/system-probe-test_spec.rb b/test/kitchen/test/integration/system-probe-test/rspec_datadog/system-probe-test_spec.rb index 24c61f27ed858..89d0956390608 100644 --- a/test/kitchen/test/integration/system-probe-test/rspec_datadog/system-probe-test_spec.rb +++ b/test/kitchen/test/integration/system-probe-test/rspec_datadog/system-probe-test_spec.rb @@ -9,25 +9,10 @@ GOLANG_TEST_FAILURE = /FAIL:/ -skip_prebuilt_tests = Array.[]( - "pkg/collector/corechecks/ebpf/probe" -) - -runtime_compiled_tests = Array.[]( - "pkg/network/tracer", - "pkg/network/protocols/http", - "pkg/collector/corechecks/ebpf/probe" -) - -co_re_tests = Array.[]( - "pkg/network/tracer", - "pkg/network/protocols/http", - "pkg/collector/corechecks/ebpf/probe" -) - TIMEOUTS = { "pkg/network/protocols/http$" => "15m", - "pkg/network/tracer$" => "25m", + "pkg/network/tracer$" => "55m", + "pkg/network/usm$" => "30m", } DEFAULT_TIMEOUT = "10m" @@ -72,16 +57,16 @@ def get_timeout(package) FileUtils.chmod 0644, f, :verbose => true end -shared_examples "passes" do |bundle, env, filter, filter_inclusive| - after :context do - print KernelOut.format(`find "/tmp/pkgjson/#{bundle}" -maxdepth 1 -type f -path "*.json" -exec cat >"/tmp/testjson/#{bundle}.json" {} +`) +describe "system-probe" do + after :all do + print KernelOut.format(`find "/tmp/pkgjson" -maxdepth 1 -type f -path "*.json" -exec cat >"/tmp/testjson/out.json" {} +`) + print KernelOut.format(`tar -C /tmp/junit -czf /tmp/junit.tar.gz .`) + print KernelOut.format(`tar -C /tmp/testjson -czf /tmp/testjson.tar.gz .`) end Dir.glob("#{tests_dir}/**/testsuite").sort.each do |f| pkg = f.delete_prefix("#{tests_dir}/").delete_suffix('/testsuite') - next unless (filter_inclusive and filter.include? pkg) or (!filter_inclusive and !filter.include? pkg) - - base_env = { + final_env = { "DD_SYSTEM_PROBE_BPF_DIR"=>"#{tests_dir}/pkg/ebpf/bytecode/build", "DD_SYSTEM_PROBE_JAVA_DIR"=>"#{tests_dir}/pkg/network/java", "GOVERSION"=>"unknown" @@ -90,17 +75,16 @@ def get_timeout(package) it "#{pkg} tests" do |ex| Dir.chdir(File.dirname(f)) do - xmlpath = "/tmp/junit/#{bundle}/#{junitfile}" + xmlpath = "/tmp/junit/#{junitfile}" cmd = ["sudo", "-E", "/go/bin/gotestsum", "--format", "dots", "--junitfile", xmlpath, - "--jsonfile", "/tmp/pkgjson/#{bundle}/#{pkg.gsub("/","-")}.json", + "--jsonfile", "/tmp/pkgjson/#{pkg.gsub("/","-")}.json", "--raw-command", "--", "/go/bin/test2json", "-t", "-p", pkg, f, "-test.v", "-test.count=1", "-test.timeout=#{get_timeout(pkg)}" ] - final_env = base_env.merge(env) Open3.popen2e(final_env, *cmd) do |_, output, wait_thr| output.each_line do |line| puts KernelOut.format(line.strip) @@ -109,7 +93,6 @@ def get_timeout(package) xmldoc = REXML::Document.new(File.read(xmlpath)) REXML::XPath.each(xmldoc, "//testsuites/testsuite/properties") do |props| - props.add_element("property", { "name" => "dd_tags[test.bundle]", "value" => bundle }) props.add_element("property", { "name" => "dd_tags[os.platform]", "value" => platform }) props.add_element("property", { "name" => "dd_tags[os.name]", "value" => osname }) props.add_element("property", { "name" => "dd_tags[os.architecture]", "value" => arch }) @@ -122,49 +105,3 @@ def get_timeout(package) end end end - -describe "system-probe" do - after :all do - print KernelOut.format(`tar -C /tmp/junit -czf /tmp/junit.tar.gz .`) - print KernelOut.format(`tar -C /tmp/testjson -czf /tmp/testjson.tar.gz .`) - end - - context "prebuilt" do - env = { - "DD_ENABLE_RUNTIME_COMPILER"=>"false", - "DD_ENABLE_CO_RE"=>"false" - } - include_examples "passes", "prebuilt", env, skip_prebuilt_tests, false - end - - context "runtime compiled" do - env = { - "DD_ENABLE_RUNTIME_COMPILER"=>"true", - "DD_ALLOW_PRECOMPILED_FALLBACK"=>"false", - "DD_ENABLE_CO_RE"=>"false" - } - include_examples "passes", "runtime", env, runtime_compiled_tests, true - end - - context "CO-RE" do - env = { - "DD_ENABLE_CO_RE"=>"true", - "DD_ENABLE_RUNTIME_COMPILER"=>"false", - "DD_ALLOW_RUNTIME_COMPILED_FALLBACK"=>"false", - "DD_ALLOW_PRECOMPILED_FALLBACK"=>"false" - } - include_examples "passes", "co-re", env, co_re_tests, true - end - - context "fentry" do - env = { - "NETWORK_TRACER_FENTRY_TESTS"=>"true", - "DD_ENABLE_CO_RE"=>"true", - "DD_ENABLE_RUNTIME_COMPILER"=>"false", - "DD_ALLOW_RUNTIME_COMPILED_FALLBACK"=>"false" - } - if osname == "amzn-2" and arch == "x86_64" and release.start_with?("5.10.") - include_examples "passes", "fentry", env, skip_prebuilt_tests, false - end - end -end diff --git a/test/new-e2e/README.md b/test/new-e2e/README.md index 9a9eecfc6ab05..15da63dd69375 100644 --- a/test/new-e2e/README.md +++ b/test/new-e2e/README.md @@ -2,6 +2,10 @@ This folder contains tests and utilities to write and run agent end to end tests based on Pulumi. +## Documentation + +See https://pkg.go.dev/github.com/DataDog/datadog-agent/test/new-e2e@main/utils/e2e. + ## Development in VSCode This is a sub-module within `datadog-agent`. VSCode will complain about the multiple `go.mod` files. While waiting for a full repo migration to go workspaces, create a go workspace file and add `test/new-e2e` to workspaces diff --git a/test/new-e2e/containers/ecs_test.go b/test/new-e2e/containers/ecs_test.go index 248bdb1ce3509..0b77b02d5f62e 100644 --- a/test/new-e2e/containers/ecs_test.go +++ b/test/new-e2e/containers/ecs_test.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/test/new-e2e/runner" "github.com/DataDog/datadog-agent/test/new-e2e/runner/parameters" "github.com/DataDog/datadog-agent/test/new-e2e/utils/infra" - "github.com/DataDog/test-infra-definitions/aws/scenarios/ecs" + "github.com/DataDog/test-infra-definitions/scenarios/aws/ecs" "github.com/cenkalti/backoff" "github.com/pulumi/pulumi/sdk/v3/go/auto" diff --git a/test/new-e2e/doc.go b/test/new-e2e/doc.go new file mode 100644 index 0000000000000..551e9238a6a7a --- /dev/null +++ b/test/new-e2e/doc.go @@ -0,0 +1,6 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package newe2e diff --git a/test/new-e2e/examples/vm_fakeintake_test.go b/test/new-e2e/examples/vm_fakeintake_test.go new file mode 100644 index 0000000000000..a39c3ca78e27b --- /dev/null +++ b/test/new-e2e/examples/vm_fakeintake_test.go @@ -0,0 +1,75 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package examples + +import ( + _ "embed" + "errors" + "time" + + "testing" + + "github.com/DataDog/datadog-agent/test/new-e2e/utils/e2e" + "github.com/cenkalti/backoff/v4" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type vmFakeintakeSuite struct { + e2e.Suite[e2e.AgentEnv] +} + +func TestE2EVMFakeintakeSuite(t *testing.T) { + e2e.Run(t, &vmFakeintakeSuite{}, e2e.AgentStackDef(nil)) +} + +func (s *vmFakeintakeSuite) TestVM() { + output := s.Env().VM.Execute("ls") + require.NotEmpty(s.T(), output) +} + +func (s *vmFakeintakeSuite) TestAgent() { + err := s.Env().Agent.WaitForReady() + require.NoError(s.T(), err) + output := s.Env().Agent.Status() + require.Contains(s.T(), output.Content, "Getting the status from the agent") + isReady, err := s.Env().Agent.IsReady() + require.NoError(s.T(), err) + assert.True(s.T(), isReady, "Agent is not ready") +} + +func (s *vmFakeintakeSuite) TestMetrics() { + t := s.T() + err := backoff.Retry(func() error { + metrics, err := s.Env().Fakeintake.Client.GetMetric("system.uptime") + if err != nil { + return err + } + if len(metrics) == 0 { + return errors.New("No metrics yet") + } + if metrics[len(metrics)-1].Points[len(metrics[len(metrics)-1].Points)-1].Value == 0 { + return errors.New("") + } + return nil + }, backoff.WithMaxRetries(backoff.NewConstantBackOff(500*time.Millisecond), 20)) + require.NoError(t, err) +} + +func (s *vmFakeintakeSuite) TestCheckRuns() { + t := s.T() + err := backoff.Retry(func() error { + checkRuns, err := s.Env().Fakeintake.Client.GetCheckRun("datadog.agent.up") + if err != nil { + return err + } + if len(checkRuns) == 0 { + return errors.New("No check run yet") + } + return nil + }, backoff.WithMaxRetries(backoff.NewConstantBackOff(500*time.Millisecond), 20)) + require.NoError(t, err) +} diff --git a/test/new-e2e/go.mod b/test/new-e2e/go.mod index 4db0dc6edccb7..8e9acf8461307 100644 --- a/test/new-e2e/go.mod +++ b/test/new-e2e/go.mod @@ -6,64 +6,73 @@ go 1.18 // The plugin versions NEED to be aligned. // TODO: Implement hard check in CI +replace github.com/DataDog/datadog-agent/test/fakeintake => ../fakeintake + require ( - github.com/DataDog/test-infra-definitions v0.0.0-20230512085134-bf3a1761a5bc + github.com/DataDog/datadog-agent/test/fakeintake v0.46.0-rc.2 + github.com/DataDog/test-infra-definitions v0.0.0-20230526143644-ed785d3a20d5 github.com/aws/aws-sdk-go-v2 v1.18.0 - github.com/aws/aws-sdk-go-v2/config v1.18.24 - github.com/aws/aws-sdk-go-v2/service/ssm v1.33.2 + github.com/aws/aws-sdk-go-v2/config v1.18.25 + github.com/aws/aws-sdk-go-v2/service/ssm v1.36.4 github.com/cenkalti/backoff v2.2.1+incompatible + github.com/cenkalti/backoff/v4 v4.2.1 github.com/pulumi/pulumi-command/sdk v0.7.2 - github.com/pulumi/pulumi/sdk/v3 v3.66.0 - github.com/stretchr/testify v1.8.2 + github.com/pulumi/pulumi/sdk/v3 v3.68.0 + github.com/stretchr/testify v1.8.3 golang.org/x/crypto v0.9.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/zorkian/go-datadog-api.v2 v2.30.0 ) require ( + github.com/DataDog/agent-payload/v5 v5.0.73 // indirect github.com/Masterminds/semver v1.5.0 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20230426101702-58e86b294756 // indirect + github.com/ProtonMail/go-crypto v0.0.0-20230518184743-7afd39499903 // indirect github.com/acomagu/bufpipe v1.0.4 // indirect github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da // indirect + github.com/agext/levenshtein v1.2.3 // indirect github.com/alessio/shellescape v1.4.1 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.13.23 // indirect + github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.13.24 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.3 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34 // indirect - github.com/aws/aws-sdk-go-v2/service/ecs v1.27.0 // indirect + github.com/aws/aws-sdk-go-v2/service/ecs v1.27.1 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.12.10 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.19.0 // indirect github.com/aws/smithy-go v1.13.5 // indirect github.com/blang/semver v3.5.1+incompatible // indirect - github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cheggaaa/pb v1.0.29 // indirect github.com/cloudflare/circl v1.3.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/djherbis/times v1.5.0 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect - github.com/go-git/gcfg v1.5.0 // indirect + github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.4.1 // indirect - github.com/go-git/go-git/v5 v5.6.1 // indirect + github.com/go-git/go-git/v5 v5.7.0 // indirect github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.1.1 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/hcl/v2 v2.16.2 // indirect github.com/imdario/mergo v0.3.15 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect - github.com/mattn/go-isatty v0.0.18 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mitchellh/go-ps v1.0.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/nxadm/tail v1.4.8 // indirect github.com/opentracing/basictracer-go v1.1.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect @@ -71,19 +80,19 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pkg/term v1.1.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/pulumi/pulumi-aws/sdk/v5 v5.40.0 // indirect + github.com/pulumi/pulumi-aws/sdk/v5 v5.41.0 // indirect github.com/pulumi/pulumi-awsx/sdk v1.0.2 // indirect github.com/pulumi/pulumi-docker/sdk/v3 v3.6.1 // indirect - github.com/pulumi/pulumi-kubernetes/sdk/v3 v3.26.0 // indirect + github.com/pulumi/pulumi-kubernetes/sdk/v3 v3.28.1 // indirect github.com/pulumi/pulumi-libvirt/sdk v0.4.0 // indirect - github.com/pulumi/pulumi-random/sdk/v4 v4.13.0 // indirect + github.com/pulumi/pulumi-random/sdk/v4 v4.13.2 // indirect github.com/rivo/uniseg v0.4.4 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect github.com/santhosh-tekuri/jsonschema/v5 v5.3.0 // indirect github.com/sergi/go-diff v1.3.1 // indirect - github.com/skeema/knownhosts v1.1.0 // indirect - github.com/spf13/cast v1.5.0 // indirect + github.com/skeema/knownhosts v1.1.1 // indirect + github.com/spf13/cast v1.5.1 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/texttheater/golang-levenshtein v1.0.1 // indirect @@ -91,6 +100,7 @@ require ( github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect + github.com/zclconf/go-cty v1.13.2 // indirect github.com/zorkian/go-datadog-api v2.30.0+incompatible // indirect go.uber.org/atomic v1.11.0 // indirect golang.org/x/mod v0.10.0 // indirect @@ -99,7 +109,7 @@ require ( golang.org/x/term v0.8.0 // indirect golang.org/x/text v0.9.0 // indirect golang.org/x/tools v0.9.1 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230526015343-6ee61e4f9d5f // indirect google.golang.org/grpc v1.55.0 // indirect google.golang.org/protobuf v1.30.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect diff --git a/test/new-e2e/go.sum b/test/new-e2e/go.sum index e560959cef595..b95fdaebfca9a 100644 --- a/test/new-e2e/go.sum +++ b/test/new-e2e/go.sum @@ -1,47 +1,47 @@ -github.com/DataDog/test-infra-definitions v0.0.0-20230512085134-bf3a1761a5bc h1:hbR/sF2RV9AShTzwLZqBuyhgH34gm5Vgh12RwUhsYzg= -github.com/DataDog/test-infra-definitions v0.0.0-20230512085134-bf3a1761a5bc/go.mod h1:WmCQAFypCQB6zjmXl9CGW0qMa5s7krntwld/tt7rjcc= +github.com/DataDog/agent-payload/v5 v5.0.73 h1:fnCnAR+nWY+q//fBZSab4cDFFng0scPEfmLdl9ngmQY= +github.com/DataDog/agent-payload/v5 v5.0.73/go.mod h1:oQZi1VZp1e3QvlSUX4iphZCpJaFepUxWq0hNXxihKBM= +github.com/DataDog/test-infra-definitions v0.0.0-20230526143644-ed785d3a20d5 h1:bevg12AMVEL49W5+ZEJ6bw6PK4JWt+oEPfH3MPsOS8Q= +github.com/DataDog/test-infra-definitions v0.0.0-20230526143644-ed785d3a20d5/go.mod h1:WmCQAFypCQB6zjmXl9CGW0qMa5s7krntwld/tt7rjcc= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g= -github.com/ProtonMail/go-crypto v0.0.0-20230426101702-58e86b294756 h1:L6S7kR7SlhQKplIBpkra3s6yhcZV51lhRnXmYc4HohI= -github.com/ProtonMail/go-crypto v0.0.0-20230426101702-58e86b294756/go.mod h1:8TI4H3IbrackdNgv+92dI+rhpCaLqM0IfpgCgenFvRE= +github.com/ProtonMail/go-crypto v0.0.0-20230518184743-7afd39499903 h1:ZK3C5DtzV2nVAQTx5S5jQvMeDqWtD1By5mOoyY/xJek= +github.com/ProtonMail/go-crypto v0.0.0-20230518184743-7afd39499903/go.mod h1:8TI4H3IbrackdNgv+92dI+rhpCaLqM0IfpgCgenFvRE= github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da h1:KjTM2ks9d14ZYCvmHS9iAKVt9AyzRSqNU1qabPih5BY= github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da/go.mod h1:eHEWzANqSiWQsof+nXEI9bUVUyV6F53Fp89EuCh2EAA= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= -github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= +github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= +github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/aws/aws-sdk-go-v2 v1.17.2/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.18.0 h1:882kkTpSFhdgYRKVZ/VCgf7sd0ru57p2JCxz4/oN5RY= github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2/config v1.18.24 h1:G0mJzpMjJFtK+7KtAky2kAjio21BdzNXblQSm2ZKsy0= -github.com/aws/aws-sdk-go-v2/config v1.18.24/go.mod h1:+9/RIaxGG2let2y9lIYEwOTBhaXqArOakom2TVytvFE= -github.com/aws/aws-sdk-go-v2/credentials v1.13.23 h1:uKTIH4RmFIo04Pijn132WEMaboVLAg96H4l2KFRGzZU= -github.com/aws/aws-sdk-go-v2/credentials v1.13.23/go.mod h1:jYPYi99wUOPIFi0rhiOvXeSEReVOzBqFNOX5bXYoG2o= +github.com/aws/aws-sdk-go-v2/config v1.18.25 h1:JuYyZcnMPBiFqn87L2cRppo+rNwgah6YwD3VuyvaW6Q= +github.com/aws/aws-sdk-go-v2/config v1.18.25/go.mod h1:dZnYpD5wTW/dQF0rRNLVypB396zWCcPiBIvdvSWHEg4= +github.com/aws/aws-sdk-go-v2/credentials v1.13.24 h1:PjiYyls3QdCrzqUN35jMWtUK1vqVZ+zLfdOa/UPFDp0= +github.com/aws/aws-sdk-go-v2/credentials v1.13.24/go.mod h1:jYPYi99wUOPIFi0rhiOvXeSEReVOzBqFNOX5bXYoG2o= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.3 h1:jJPgroehGvjrde3XufFIJUZVK5A2L9a3KwSFgKy9n8w= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.3/go.mod h1:4Q0UFP0YJf0NrsEuEYHpM9fTSEVnD16Z3uyEF7J9JGM= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.26/go.mod h1:2E0LdbJW6lbeU4uxjum99GZzI0ZjDpAb0CoSCM0oeEY= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33 h1:kG5eQilShqmJbv11XL1VpyDbaEJzWxd4zRiCG30GSn4= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33/go.mod h1:7i0PF1ME/2eUPFcjkVIwq+DOygHEoK92t5cDqNgYbIw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.20/go.mod h1:/+6lSiby8TBFpTVXZgKiN/rCfkYXEGvhlM4zCgPpt7w= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27 h1:vFQlirhuM8lLlpI7imKOMsjdQLuN9CPi+k44F/OFVsk= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27/go.mod h1:UrHnn3QV/d0pBZ6QBAEQcqFLf8FAzLmoUfPVIueOvoM= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34 h1:gGLG7yKaXG02/jBlg210R7VgQIotiQntNhsCFejawx8= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34/go.mod h1:Etz2dj6UHYuw+Xw830KfzCfWGMzqvUTCjUj5b76GVDc= -github.com/aws/aws-sdk-go-v2/service/ecs v1.27.0 h1:vPpYBJOv1e7WxJPt1IRezDX6BBj/yncV2N0LDnDNOMo= -github.com/aws/aws-sdk-go-v2/service/ecs v1.27.0/go.mod h1:SB6YszwN1iKvyt/Qk+ICeKsfBxjd0CTEwwkmej9qoa0= +github.com/aws/aws-sdk-go-v2/service/ecs v1.27.1 h1:54QSuWR3Pot7HqBRXd+c1yF97h2bqzDBID8qFSAkTlE= +github.com/aws/aws-sdk-go-v2/service/ecs v1.27.1/go.mod h1:SB6YszwN1iKvyt/Qk+ICeKsfBxjd0CTEwwkmej9qoa0= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27 h1:0iKliEXAcCa2qVtRs7Ot5hItA2MsufrphbRFlz1Owxo= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27/go.mod h1:EOwBD4J4S5qYszS5/3DpkejfuK+Z5/1uzICfPaZLtqw= -github.com/aws/aws-sdk-go-v2/service/ssm v1.33.2 h1:NXq6I98AZ3rrnykgTp93ik4RykmYEInnGDc4I/mYQNk= -github.com/aws/aws-sdk-go-v2/service/ssm v1.33.2/go.mod h1:bUqD3OXwwp4e+IPXVPfp6g/7OyiSesUjqHwOcwtfZBM= +github.com/aws/aws-sdk-go-v2/service/ssm v1.36.4 h1:3AjvCuRS8OnNVRC/UBagp1Jo2feR94+VAIKO4lz8gOQ= +github.com/aws/aws-sdk-go-v2/service/ssm v1.36.4/go.mod h1:p6MaesK9061w6NTiFmZpUzEkKUY5blKlwD2zYyErxKA= github.com/aws/aws-sdk-go-v2/service/sso v1.12.10 h1:UBQjaMTCKwyUYwiVnUt6toEJwGXsLBI6al083tpjJzY= github.com/aws/aws-sdk-go-v2/service/sso v1.12.10/go.mod h1:ouy2P4z6sJN70fR3ka3wD3Ro3KezSxU6eKGQI2+2fjI= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10 h1:PkHIIJs8qvq0e5QybnZoG1K/9QTrLr9OsqCIo59jOBA= @@ -50,6 +50,7 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.19.0 h1:2DQLAKDteoEDI8zpCzqBMaZlJuoE github.com/aws/aws-sdk-go-v2/service/sts v1.19.0/go.mod h1:BgQOMsg8av8jset59jelyPW7NoZcZXLVpDsXunGDrk8= github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= @@ -69,25 +70,23 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/djherbis/times v1.5.0 h1:79myA211VwPhFTqUk8xehWrsEO+zcIZj0zT8mXPVARU= github.com/djherbis/times v1.5.0/go.mod h1:5q7FDLvbNg1L/KaBmPcWlVR9NmoKo3+ucqUA3ijQhA0= +github.com/elazarl/goproxy v0.0.0-20221015165544-a0805db90819 h1:RIB4cRk+lBqKK3Oy0r2gRX4ui7tuhiZq2SuTtTCi0/0= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= -github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= -github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= -github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= -github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.4.1 h1:Uwp5tDRkPr+l/TnbHOQzp+tmJfLceOlbVucgpTz8ix4= github.com/go-git/go-billy/v5 v5.4.1/go.mod h1:vjbugF6Fz7JIflbVpl1hJsGjSHNltrSw45YK/ukIvQg= -github.com/go-git/go-git-fixtures/v4 v4.3.1 h1:y5z6dd3qi8Hl+stezc8p3JxDkoTRqMAlKnXHuzrfjTQ= -github.com/go-git/go-git-fixtures/v4 v4.3.1/go.mod h1:8LHG1a3SRW71ettAD/jW13h8c6AqjVSeL11RAdgaqpo= -github.com/go-git/go-git/v5 v5.6.1 h1:q4ZRqQl4pR/ZJHc1L5CFjGA1a10u76aV1iC+nh+bHsk= -github.com/go-git/go-git/v5 v5.6.1/go.mod h1:mvyoL6Unz0PiTQrGQfSfiLFhBH1c1e84ylC2MDs4ee8= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20230305113008-0c11038e723f h1:Pz0DHeFij3XFhoBRGUDPzSJ+w2UcK5/0JvF8DRI58r8= +github.com/go-git/go-git/v5 v5.7.0 h1:t9AudWVLmqzlo+4bqdf7GY+46SUuRsx59SboFxkq2aE= +github.com/go-git/go-git/v5 v5.7.0/go.mod h1:coJHKEOk5kUClpsNlXrUvPrDxY3w3gjHvhcZd8Fodw8= github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= @@ -95,13 +94,14 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v1.1.1 h1:jxpi2eWoU84wbX9iIEyAeeoac3FLuifZpY9tcNUD9kw= github.com/golang/glog v1.1.1/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -109,14 +109,14 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/hashicorp/hcl/v2 v2.16.2 h1:mpkHZh/Tv+xet3sy3F9Ld4FyI2tUpWe9x3XtPx9f1a0= +github.com/hashicorp/hcl/v2 v2.16.2/go.mod h1:JRmR89jycNkrrqnMmvPDMd56n1rQJ2Q6KocSLCMCXng= github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -139,14 +139,15 @@ github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVc github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= -github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= -github.com/mmcloughlin/avo v0.5.0/go.mod h1:ChHFdoV7ql95Wi7vuq2YT1bwCJqiWdZrQ1im3VujLYM= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -163,22 +164,22 @@ github.com/pkg/term v1.1.0 h1:xIAAdCMh3QIAy+5FrE8Ad8XoDhEU4ufwbaSozViP9kk= github.com/pkg/term v1.1.0/go.mod h1:E25nymQcrSllhX42Ok8MRm1+hyBdHY0dCeiKZ9jpNGw= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pulumi/pulumi-aws/sdk/v5 v5.40.0 h1:HnbtERBDA9Ui5MuKl+7m+eccR8QL3wjz93DRIfU/e80= -github.com/pulumi/pulumi-aws/sdk/v5 v5.40.0/go.mod h1:qFeKTFSNIlMHotu9ntOWFjJBHtCiUhJeaiUB/0nVwXk= +github.com/pulumi/pulumi-aws/sdk/v5 v5.41.0 h1:SJwp+c3GsfnUk3lP0yJJUsQ7FE5SnHINZpUqQkgYiPQ= +github.com/pulumi/pulumi-aws/sdk/v5 v5.41.0/go.mod h1:qFeKTFSNIlMHotu9ntOWFjJBHtCiUhJeaiUB/0nVwXk= github.com/pulumi/pulumi-awsx/sdk v1.0.2 h1:SLm2MUICM91b/02r7KTI2/4R1XRJedIeMQdeYxATe9k= github.com/pulumi/pulumi-awsx/sdk v1.0.2/go.mod h1:jwPmIPvPTVYkq+n6Nz/QfMhNZ1cHvBSORdRYvljV9Xo= github.com/pulumi/pulumi-command/sdk v0.7.2 h1:YmnCX2lc70kpO9DxE4TJyApL1Tq19gxAaVpThQuDthY= github.com/pulumi/pulumi-command/sdk v0.7.2/go.mod h1:niZxKP6w3PQdwOWnRwjop2LNd1TcdIQR+LuzIEGX4kU= github.com/pulumi/pulumi-docker/sdk/v3 v3.6.1 h1:plWLn9O6u80Vr37LoCsckyobBfcrdTU9cERor72QjqA= github.com/pulumi/pulumi-docker/sdk/v3 v3.6.1/go.mod h1:N4Yu4c49QErfucPt9Y/fGmpTryRqc0VfhyKHsGR9/g8= -github.com/pulumi/pulumi-kubernetes/sdk/v3 v3.26.0 h1:BODPAprLquqXAbyLqHhtHVsKFNZ7a/rWqQmu0/LHnAA= -github.com/pulumi/pulumi-kubernetes/sdk/v3 v3.26.0/go.mod h1:oR1GgmTus9bpa5hKRaPXD2qn87W4AjGpRVPuzcOfilM= +github.com/pulumi/pulumi-kubernetes/sdk/v3 v3.28.1 h1:khqNTTqOYyPs2vM76cREa3brIqFkEqlfgKK3JZGhVY4= +github.com/pulumi/pulumi-kubernetes/sdk/v3 v3.28.1/go.mod h1:OdSHna8qSimLhP7HpTcUwvnGzl2MoaXF3gS39pIO/Xc= github.com/pulumi/pulumi-libvirt/sdk v0.4.0 h1:wq1Ox8FRKQ1kc2DPq3m5DGQgZEhE7kp4mtG556HxJLs= github.com/pulumi/pulumi-libvirt/sdk v0.4.0/go.mod h1:tjjyDajp6Pb1pRCdaIugknIfzxw3Prev3o/k2nade+I= -github.com/pulumi/pulumi-random/sdk/v4 v4.13.0 h1:NxjG8bh0ro9v6EdnsbIgOL++auzosgjtP0pvbprmee4= -github.com/pulumi/pulumi-random/sdk/v4 v4.13.0/go.mod h1:9R/coqYWbyLEvPJRY3/75NpQfhG81YnUkll59MBW/hg= -github.com/pulumi/pulumi/sdk/v3 v3.66.0 h1:85qz3fTvAs0J4YoOM/1I1RK/adUptA/bYmYU/v14MRk= -github.com/pulumi/pulumi/sdk/v3 v3.66.0/go.mod h1:hK2uQnf2SwwvCcaAco3l9+g5mGOkRfR7uqUaZpY/fD8= +github.com/pulumi/pulumi-random/sdk/v4 v4.13.2 h1:p4Cti4RIBKA0qi+JG/AofpcVsdyCaHd5gKMU5y/3I2Q= +github.com/pulumi/pulumi-random/sdk/v4 v4.13.2/go.mod h1:cFlJw0eQnqN+62QpITEF9M08gVyzNCeXrKRsuJptFak= +github.com/pulumi/pulumi/sdk/v3 v3.68.0 h1:JWn3DGJhzoWL8bNbUdyLSSPeKS2F9mv14/EL9QeVT3w= +github.com/pulumi/pulumi/sdk/v3 v3.68.0/go.mod h1:A/WHc5MlxU8GpX/sRmfQ9G0/Bxxl4GNdSP7TQmy4yIw= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -189,32 +190,26 @@ github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDj github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs= github.com/santhosh-tekuri/jsonschema/v5 v5.3.0 h1:uIkTLo0AGRc8l7h5l9r+GcYi9qfVPt6lD4/bhmzfiKo= github.com/santhosh-tekuri/jsonschema/v5 v5.3.0/go.mod h1:FKdcjfQW6rpZSnxxUvEA5H/cDPdvJ/SZJQLWWXWGrZ0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/skeema/knownhosts v1.1.0 h1:Wvr9V0MxhjRbl3f9nMnKnFfiWTJmtECJ9Njkea3ysW0= -github.com/skeema/knownhosts v1.1.0/go.mod h1:sKFq3RD6/TKZkSWn8boUbDC7Qkgcv+8XXijpFO6roag= -github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= -github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/skeema/knownhosts v1.1.1 h1:MTk78x9FPgDFVFkDLTrsnnfCJl7g1C/nnKvePgrIngE= +github.com/skeema/knownhosts v1.1.1/go.mod h1:g4fPeYpque7P0xefxtGzV81ihjC8sX2IqpAoNkjxbMo= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/texttheater/golang-levenshtein v1.0.1 h1:+cRNoVrfiwufQPhoMzB6N0Yf/Mqajr6t1lOv8GyGE2U= github.com/texttheater/golang-levenshtein v1.0.1/go.mod h1:PYAKrbF5sAiq9wd+H82hs7gNaen0CplQ9uvm6+enD/8= github.com/tweekmonster/luser v0.0.0-20161003172636-3fa38070dbd7 h1:X9dsIWPuuEJlPX//UmRKophhOKCGXc46RVIGuttks68= @@ -228,20 +223,17 @@ github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zclconf/go-cty v1.13.2 h1:4GvrUxe/QUDYuJKAav4EYqdM47/kZa672LwmXFmEKT0= +github.com/zclconf/go-cty v1.13.2/go.mod h1:YKQzy/7pZ7iq2jNFzy5go57xdxdWoLLpaEp4u238AE0= github.com/zorkian/go-datadog-api v2.30.0+incompatible h1:R4ryGocppDqZZbnNc5EDR8xGWF/z/MxzWnqTUijDQes= github.com/zorkian/go-datadog-api v2.30.0+incompatible/go.mod h1:PkXwHX9CUQa/FpB9ZwAD45N1uhCW4MT/Wj7m36PbKss= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -golang.org/x/arch v0.1.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= @@ -250,7 +242,6 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= @@ -262,10 +253,7 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= @@ -281,23 +269,18 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -305,8 +288,6 @@ golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= @@ -315,7 +296,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= @@ -327,7 +307,6 @@ golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= @@ -335,8 +314,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230526015343-6ee61e4f9d5f h1:QNVuVEP2S7NNxLdNdOq0RiW3c9pW4gIpUUd+GAOjk1Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230526015343-6ee61e4f9d5f/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= @@ -345,7 +324,6 @@ google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cn google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= @@ -353,12 +331,10 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/zorkian/go-datadog-api.v2 v2.30.0 h1:umQdVO0Ytx+kYadhuJNjFtDgIsIEBnKrOTvNuu8ClKI= @@ -366,6 +342,5 @@ gopkg.in/zorkian/go-datadog-api.v2 v2.30.0/go.mod h1:kx0CSMRpzEZfx/nFH62GLU4stZj lukechampine.com/frand v1.4.2 h1:RzFIpOvkMXuPMBb9maa4ND4wjBn71E1Jpf8BzJHMaVw= lukechampine.com/frand v1.4.2/go.mod h1:4S/TM2ZgrKejMcKMbeLjISpJMO+/eZ1zu3vYX9dtj3s= pgregory.net/rapid v0.5.5 h1:jkgx1TjbQPD/feRoK+S/mXw9e1uj6WilpHrXJowi6oA= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= sourcegraph.com/sourcegraph/appdash v0.0.0-20211028080628-e2786a622600 h1:hfyJ5ku9yFtLVOiSxa3IN+dx5eBQT9mPmKFypAmg8XM= sourcegraph.com/sourcegraph/appdash v0.0.0-20211028080628-e2786a622600/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/test/new-e2e/ndm/snmp/snmpTestEnv.go b/test/new-e2e/ndm/snmp/snmpTestEnv.go index e8a9bc876b97d..6bdd1f11d9c9a 100644 --- a/test/new-e2e/ndm/snmp/snmpTestEnv.go +++ b/test/new-e2e/ndm/snmp/snmpTestEnv.go @@ -12,8 +12,8 @@ import ( "path" "github.com/DataDog/datadog-agent/test/new-e2e/utils/infra" - ec2vm "github.com/DataDog/test-infra-definitions/aws/scenarios/vm/ec2VM" - "github.com/DataDog/test-infra-definitions/datadog/agent/docker" + "github.com/DataDog/test-infra-definitions/components/datadog/agent/docker" + ec2vm "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2VM" "github.com/pulumi/pulumi/sdk/v3/go/auto" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" diff --git a/test/new-e2e/runner/ci-profile.go b/test/new-e2e/runner/ci-profile.go index 4ce65b8888bc8..563fd638bf541 100644 --- a/test/new-e2e/runner/ci-profile.go +++ b/test/new-e2e/runner/ci-profile.go @@ -8,10 +8,15 @@ package runner import ( "fmt" "os" + "strings" "github.com/DataDog/datadog-agent/test/new-e2e/runner/parameters" ) +const defaultCISecretPrefix = "ci.datadog-agent." + +var defaultCIEnvs = []string{"aws/agent-qa"} + type ciProfile struct { baseProfile @@ -23,9 +28,12 @@ func NewCIProfile() (Profile, error) { if err := os.MkdirAll(workspaceFolder, 0o700); err != nil { return nil, fmt.Errorf("unable to create temporary folder at: %s, err: %w", workspaceFolder, err) } - + ciSecretPrefix := os.Getenv("CI_SECRET_PREFIX") + if len(ciSecretPrefix) == 0 { + ciSecretPrefix = defaultCISecretPrefix + } // Secret store - secretStore := parameters.NewAWSStore("ci.datadog-agent.") + secretStore := parameters.NewAWSStore(ciSecretPrefix) // Set Pulumi password passVal, err := secretStore.Get(parameters.PulumiPassword) @@ -42,8 +50,15 @@ func NewCIProfile() (Profile, error) { } store := parameters.NewEnvStore(EnvPrefix) + + ciEnvironments := defaultCIEnvs + ciEnvironmentsStr := os.Getenv("CI_ENV_NAMES") + if len(ciEnvironmentsStr) > 0 { + ciEnvironments = strings.Split(ciEnvironmentsStr, " ") + } + return ciProfile{ - baseProfile: newProfile("e2eci", []string{"aws/agent-qa"}, store, &secretStore), + baseProfile: newProfile("e2eci", ciEnvironments, store, &secretStore), ciUniqueID: pipelineID + "-" + projectID, }, nil } diff --git a/test/new-e2e/runner/configmap.go b/test/new-e2e/runner/configmap.go index bb14b838601eb..b0fb4d8113eb6 100644 --- a/test/new-e2e/runner/configmap.go +++ b/test/new-e2e/runner/configmap.go @@ -10,8 +10,8 @@ import ( "errors" "github.com/DataDog/datadog-agent/test/new-e2e/runner/parameters" - infraaws "github.com/DataDog/test-infra-definitions/aws" commonconfig "github.com/DataDog/test-infra-definitions/common/config" + infraaws "github.com/DataDog/test-infra-definitions/resources/aws" "github.com/pulumi/pulumi/sdk/v3/go/auto" ) diff --git a/test/new-e2e/runner/configmap_integration_test.go b/test/new-e2e/runner/configmap_integration_test.go index 8a8d11fdee977..f18f7f62ecf7a 100644 --- a/test/new-e2e/runner/configmap_integration_test.go +++ b/test/new-e2e/runner/configmap_integration_test.go @@ -27,10 +27,11 @@ func Test_BuildStackParameters(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, configMap) assert.Equal(t, ConfigMap{ - "ddagent:apiKey": auto.ConfigValue{Value: "api_key", Secret: true}, - "ddagent:appKey": auto.ConfigValue{Value: "app_key", Secret: true}, - "namespace:key/foo": auto.ConfigValue{Value: "42", Secret: false}, - "ddinfra:aws/defaultKeyPairName": auto.ConfigValue{Value: "key_pair_name", Secret: false}, - "ddinfra:env": auto.ConfigValue{Value: "", Secret: false}, + "ddagent:apiKey": auto.ConfigValue{Value: "api_key", Secret: true}, + "ddagent:appKey": auto.ConfigValue{Value: "app_key", Secret: true}, + "namespace:key/foo": auto.ConfigValue{Value: "42", Secret: false}, + "ddinfra:aws/defaultKeyPairName": auto.ConfigValue{Value: "key_pair_name", Secret: false}, + "ddinfra:env": auto.ConfigValue{Value: "", Secret: false}, + "ddinfra:aws/defaultPublicKeyPath": auto.ConfigValue{Value: "public_key_path", Secret: false}, }, configMap) } diff --git a/test/new-e2e/system-probe/system-probe-test-env.go b/test/new-e2e/system-probe/system-probe-test-env.go index 31c95f9fb6929..21ea0c52c7cd4 100644 --- a/test/new-e2e/system-probe/system-probe-test-env.go +++ b/test/new-e2e/system-probe/system-probe-test-env.go @@ -16,9 +16,9 @@ import ( "github.com/DataDog/datadog-agent/test/new-e2e/runner" "github.com/DataDog/datadog-agent/test/new-e2e/utils/infra" - "github.com/DataDog/test-infra-definitions/aws" - "github.com/DataDog/test-infra-definitions/aws/scenarios/microVMs/microvms" - "github.com/DataDog/test-infra-definitions/command" + "github.com/DataDog/test-infra-definitions/components/command" + "github.com/DataDog/test-infra-definitions/resources/aws" + "github.com/DataDog/test-infra-definitions/scenarios/aws/microVMs/microvms" "github.com/pulumi/pulumi-command/sdk/go/command/remote" "github.com/pulumi/pulumi/sdk/v3/go/auto" @@ -105,9 +105,14 @@ func NewTestEnv(name, securityGroups, subnets, x86InstanceType, armInstanceType var depends []pulumi.Resource osCommand := command.NewUnixOSCommand() for _, instance := range scenarioDone.Instances { - remoteRunner, err := command.NewRunner(*awsEnvironment.CommonEnvironment, "remote-runner-"+instance.Arch, instance.Connection, func(r *command.Runner) (*remote.Command, error) { - return command.WaitForCloudInit(r) - }, osCommand) + remoteRunner, err := command.NewRunner(*awsEnvironment.CommonEnvironment, command.RunnerArgs{ + ConnectionName: "remote-runner-" + instance.Arch, + Connection: instance.Connection, + ReadyFunc: func(r *command.Runner) (*remote.Command, error) { + return command.WaitForCloudInit(r) + }, + OSCommand: osCommand, + }) // if shutdown period specified then register a cron job // to automatically shutdown the ec2 instance after desired diff --git a/test/new-e2e/utils/e2e/client/agent.go b/test/new-e2e/utils/e2e/client/agent.go index d23b146218e7a..27ec75a86238a 100644 --- a/test/new-e2e/utils/e2e/client/agent.go +++ b/test/new-e2e/utils/e2e/client/agent.go @@ -11,21 +11,25 @@ import ( "testing" "time" - "github.com/DataDog/test-infra-definitions/datadog/agent" + "github.com/DataDog/test-infra-definitions/components/datadog/agent" + "github.com/DataDog/test-infra-definitions/components/os" "github.com/cenkalti/backoff" ) var _ clientService[agent.ClientData] = (*Agent)(nil) -// A client Agent that is connected to an agent.Installer defined in test-infra-definition. +// A client Agent that is connected to an [agent.Installer]. +// +// [agent.Installer]: https://pkg.go.dev/github.com/DataDog/test-infra-definitions@main/components/datadog/agent#Installer type Agent struct { *UpResultDeserializer[agent.ClientData] *vmClient + os os.OS } // Create a new instance of Agent func NewAgent(installer *agent.Installer) *Agent { - agentInstance := &Agent{} + agentInstance := &Agent{os: installer.VM().GetOS()} agentInstance.UpResultDeserializer = NewUpResultDeserializer[agent.ClientData](installer, agentInstance) return agentInstance } @@ -38,11 +42,15 @@ func (agent *Agent) initService(t *testing.T, data *agent.ClientData) error { } func (agent *Agent) Version() string { - return agent.vmClient.Execute("datadog-agent version") + return agent.vmClient.Execute(agent.GetCommand("version")) +} + +func (agent *Agent) GetCommand(parameters string) string { + return agent.os.GetRunAgentCmd(parameters) } func (agent *Agent) Config() string { - return agent.vmClient.Execute("sudo datadog-agent config") + return agent.vmClient.Execute(agent.GetCommand("config")) } type Status struct { @@ -59,25 +67,25 @@ func (s *Status) isReady() (bool, error) { } func (agent *Agent) Status() *Status { - return newStatus(agent.vmClient.Execute("sudo datadog-agent status")) + return newStatus(agent.vmClient.Execute(agent.GetCommand("status"))) } -// IsReady runs status command and returns true if the agent is ready -// Use this to wait for agent to be ready before running any command +// IsReady runs status command and returns true if the agent is ready. +// Use this to wait for agent to be ready before running any command. func (a *Agent) IsReady() (bool, error) { return a.Status().isReady() } -// WaitForReady blocks up for one minute waiting for agent to be ready -// Retries every 100 ms up to one minute -// Returns error on failure +// WaitForReady blocks up to one minute waiting for agent to be ready. +// Retries every 100 ms up to one minute. +// Returns error on failure. func (a *Agent) WaitForReady() error { return a.WaitForReadyTimeout(1 * time.Minute) } -// WaitForReady blocks up for timeout waiting for agent to be ready -// Retries every 100 ms up to timeout -// Returns error on failure +// WaitForReady blocks up to timeout waiting for agent to be ready. +// Retries every 100 ms up to timeout. +// Returns error on failure. func (a *Agent) WaitForReadyTimeout(timeout time.Duration) error { interval := 100 * time.Millisecond maxRetries := timeout.Milliseconds() / interval.Milliseconds() diff --git a/test/new-e2e/utils/e2e/client/agent_test.go b/test/new-e2e/utils/e2e/client/agent_test.go index cc41fe7d37485..46c91caeae0ca 100644 --- a/test/new-e2e/utils/e2e/client/agent_test.go +++ b/test/new-e2e/utils/e2e/client/agent_test.go @@ -76,9 +76,8 @@ Agent (v7.44.0-rc-7) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - s := NewStatus(tt.payload) - - isReady, err := s.IsReady() + s := newStatus(tt.payload) + isReady, err := s.isReady() require.NoError(t, err) assert.Equal(t, tt.expectedReady, isReady) }) diff --git a/test/new-e2e/utils/e2e/client/fakeintake.go b/test/new-e2e/utils/e2e/client/fakeintake.go new file mode 100644 index 0000000000000..89e6b44f44520 --- /dev/null +++ b/test/new-e2e/utils/e2e/client/fakeintake.go @@ -0,0 +1,34 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package client + +import ( + "testing" + + fakeintake "github.com/DataDog/datadog-agent/test/fakeintake/client" + infraFakeintake "github.com/DataDog/test-infra-definitions/components/datadog/fakeintake" +) + +var _ clientService[infraFakeintake.ClientData] = (*Fakeintake)(nil) + +// A client fakeintake that is connected to a fakeintake ECS task defined in test-infra-definition. +type Fakeintake struct { + *UpResultDeserializer[infraFakeintake.ClientData] + *fakeintake.Client +} + +// Create a new instance of +func NewFakeintake(exporter *infraFakeintake.ConnectionExporter) *Fakeintake { + fi := &Fakeintake{} + fi.UpResultDeserializer = NewUpResultDeserializer[infraFakeintake.ClientData](exporter, fi) + return fi +} + +//lint:ignore U1000 Ignore unused function as this function is call using reflection +func (fi *Fakeintake) initService(t *testing.T, data *infraFakeintake.ClientData) error { + fi.Client = fakeintake.NewClient("http://" + data.URL) + return nil +} diff --git a/test/new-e2e/utils/e2e/client/stack_initializer_test.go b/test/new-e2e/utils/e2e/client/stack_initializer_test.go index 16186ce4b3656..5e6affa475c39 100644 --- a/test/new-e2e/utils/e2e/client/stack_initializer_test.go +++ b/test/new-e2e/utils/e2e/client/stack_initializer_test.go @@ -19,6 +19,7 @@ func TestValidEnv(t *testing.T) { require.NoError(t, CheckEnvStructValid[ValidEnv]()) } +// nolint:unused type UnexportedFieldEnv struct { vm *VM } diff --git a/test/new-e2e/utils/e2e/client/up_result_deserializer.go b/test/new-e2e/utils/e2e/client/up_result_deserializer.go index e3ab6cae69c39..c300f42ca752b 100644 --- a/test/new-e2e/utils/e2e/client/up_result_deserializer.go +++ b/test/new-e2e/utils/e2e/client/up_result_deserializer.go @@ -22,7 +22,7 @@ type clientServiceInitializer[T any] interface { } // UpResultDeserializer is an helper to build a new type that can be used in an environment. -// It is designed to be used as an embeded field. +// It is designed to be used as an embedded field. // See VM type in this package for an example of usage. type UpResultDeserializer[T any] struct { initializer clientServiceInitializer[T] diff --git a/test/new-e2e/utils/e2e/client/vm.go b/test/new-e2e/utils/e2e/client/vm.go index f8cb6d9794a7d..9879e888bce32 100644 --- a/test/new-e2e/utils/e2e/client/vm.go +++ b/test/new-e2e/utils/e2e/client/vm.go @@ -3,23 +3,24 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. +// Package client contains clients used to communicate with the remote service package client import ( "testing" - commonvm "github.com/DataDog/test-infra-definitions/common/vm" + commonvm "github.com/DataDog/test-infra-definitions/components/vm" ) var _ clientService[commonvm.ClientData] = (*VM)(nil) -// A client VM that is connected to a VM defined in test-infra-definition. +// VM is a client VM that is connected to a VM defined in test-infra-definition. type VM struct { *UpResultDeserializer[commonvm.ClientData] *vmClient } -// Create a new instance of VM +// NewVM creates a new instance of VM func NewVM(infraVM commonvm.VM) *VM { vm := &VM{} vm.UpResultDeserializer = NewUpResultDeserializer[commonvm.ClientData](infraVM, vm) diff --git a/test/new-e2e/utils/e2e/client/vm_client.go b/test/new-e2e/utils/e2e/client/vm_client.go index 5110f8de5614d..ae58f159bb2d4 100644 --- a/test/new-e2e/utils/e2e/client/vm_client.go +++ b/test/new-e2e/utils/e2e/client/vm_client.go @@ -35,7 +35,11 @@ func newVMClient(t *testing.T, sshKey string, connection *utils.Connection) (*vm // ExecuteWithError executes a command and returns an error if any. func (vmClient *vmClient) ExecuteWithError(command string) (string, error) { - return clients.ExecuteCommand(vmClient.client, command) + output, err := clients.ExecuteCommand(vmClient.client, command) + if err != nil { + return "", fmt.Errorf("%v: %v", output, err) + } + return output, nil } // Execute execute a command and asserts there is no error. diff --git a/test/new-e2e/utils/e2e/e2e.go b/test/new-e2e/utils/e2e/e2e.go index 33e4efb65c9b0..480549bccc996 100644 --- a/test/new-e2e/utils/e2e/e2e.go +++ b/test/new-e2e/utils/e2e/e2e.go @@ -3,8 +3,223 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// Package e2e provides tools to manage environments and run E2E tests. -// See [Suite] for an example of the usage. +// Package e2e provides the API to manage environments and organize E2E tests. +// +// Here is a small example of E2E tests. +// E2E tests use [testify Suite] and it is strongly recommended to read the documentation of +// [testify Suite] if you are not familiar with it. +// +// import ( +// "testing" +// +// "github.com/DataDog/datadog-agent/test/new-e2e/utils/e2e" +// ) +// +// type vmSuite struct { +// e2e.Suite[e2e.VMEnv] +// } +// +// func TestVMSuite(t *testing.T) { +// e2e.Run(t, &vmSuite{}, e2e.EC2VMStackDef()) +// } +// +// func (v *vmSuite) TestBasicVM() { +// v.Env().VM.Execute("ls") +// } +// +// To write an E2E test: +// +// 1. Define your own [suite] type with the embedded [e2e.Suite] struct. +// +// type vmSuite struct { +// e2e.Suite[e2e.VMEnv] +// } +// +// [e2e.VMEnv] defines the components available in your stack. See "Using existing stack definition" section for more information. +// +// 2. Write a regular Go test function that runs the test suite using [e2e.Run]. +// +// func TestVMSuite(t *testing.T) { +// e2e.Run(t, &vmSuite{}, e2e.EC2VMStackDef()) +// } +// +// The first argument of [e2e.Run] is an instance of type [*testing.T]. +// +// The second argument is a pointer to an empty instance of the previous defined structure (&vmSuite{} in our example) +// +// The third parameter defines the environment. See "Using existing stack definition" section for more information about a environment definition. +// +// 3. Write a test function +// +// func (v *vmSuite) TestBasicVM() { +// v.Env().VM.Execute("ls") +// } +// +// [e2e.Suite.Env] gives access to the components in your environment. +// +// Depending on your stack definition, [e2e.Suite.Env] can provide the following objects: +// - [client.VM]: A virtual machine where you can execute commands. +// - [client.Agent]: A struct that provides methods to run datadog agent commands. +// - [client.Fakeintake]: A struct that provides methods to run queries to a fake instance of Datadog intake. +// +// # Using an existing stack definition +// +// The stack definition defines the components available in your environment. +// +// type vmSuite struct { +// e2e.Suite[e2e.VMEnv] +// } +// +// func TestVMSuite(t *testing.T) { +// e2e.Run(t, &vmSuite{}, e2e.EC2VMStackDef()) +// } +// +// In this example, the components available are defined by the struct [e2e.VMEnv] which contains a virtual machine. +// The generic type of [e2e.Suite] must match the type of the stack definition. +// In our example, [e2e.EC2VMStackDef] returns an instance of [*e2e.StackDefinition][[e2e.VMEnv]]. +// +// The following default stack definitions are provided: +// - [e2e.EC2VMStackDef] creates an environment with a virtual machine. See [e2e.EC2VMStackDef] for more information about the supported options. +// - [e2e.AgentStackDef] creates an environment with an Agent installed on a virtual machine. See [e2e.AgentStackDef] for more information about the supported options. +// +// # Defining your stack definition +// +// In some special cases, you have to define a custom environment. +// Here is an example of an environment with Docker installed on a virtual machine. +// +// type dockerSuite struct { +// e2e.Suite[e2e.VMEnv] +// } +// +// func TestDockerSuite(t *testing.T) { +// e2e.Run(t, &dockerSuite{}, e2e.EnvFactoryStackDef(dockerEnvFactory)) +// } +// +// func dockerEnvFactory(ctx *pulumi.Context) (*e2e.VMEnv, error) { +// vm, err := ec2vm.NewUnixEc2VM(ctx) +// if err != nil { +// return nil, err +// } +// +// _, err = docker.NewAgentDockerInstaller(vm.UnixVM, docker.WithAgent(docker.WithAgentImageTag("7.42.0"))) +// +// if err != nil { +// return nil, err +// } +// +// return &e2e.VMEnv{ +// VM: client.NewVM(vm), +// }, nil +// } +// +// func (docker *dockerSuite) TestDocker() { +// docker.Env().VM.Execute("docker container ls") +// } +// +// [e2e.EnvFactoryStackDef] is used to define a custom environment. +// Here is a non exhaustive list of components that can be used to create a custom environment: +// - [EC2 VM]: Provide methods to create a virtual machine on EC2. +// - [Agent]: Provide methods to install the Agent on a virtual machine +// - [File Manager]: Provide methods to manipulate files and folders +// +// # Organizing your tests +// +// The execution order for tests in [testify Suite] is IMPLEMENTATION SPECIFIC +// UNLIKE REGULAR GO TESTS. +// +// # Having a single environment +// +// In the simple case, there is a single environment and each test checks one specific thing. +// +// type singleEnvSuite struct { +// e2e.Suite[e2e.AgentEnv] +// } +// +// func TestSingleEnvSuite(t *testing.T) { +// e2e.Run(t, &singleEnvSuite{}, e2e.AgentStackDef(nil)) +// } +// +// func (suite *singleEnvSuite) Test1() { +// // Check feature 1 +// } +// +// func (suite *singleEnvSuite) Test2() { +// // Check feature 2 +// } +// +// func (suite *singleEnvSuite) Test3() { +// // Check feature 3 +// } +// +// # Having different environments +// +// In this scenario, the environment is different for each test (or for most of them). +// [e2e.Suite.UpdateEnv] is used to update the environment. +// Keep in mind that using [e2e.Suite.UpdateEnv] to update virtual machine settings can destroy +// the current virtual machine and create a new one when updating the operating system for example. +// +// Note: Calling twice [e2e.Suite.UpdateEnv] with the same argument does nothing. +// +// type multipleEnvSuite struct { +// e2e.Suite[e2e.AgentEnv] +// } +// +// func TestMultipleEnvSuite(t *testing.T) { +// e2e.Run(t, &multipleEnvSuite{}, e2e.AgentStackDef(nil)) +// } +// +// func (suite *multipleEnvSuite) TestLogDebug() { +// suite.UpdateEnv(e2e.AgentStackDef(nil, agent.WithAgentConfig("log_level: debug"))) +// config := suite.Env().Agent.Config() +// require.Contains(suite.T(), config, "log_level: debug") +// } +// +// func (suite *multipleEnvSuite) TestLogInfo() { +// suite.UpdateEnv(e2e.AgentStackDef(nil, agent.WithAgentConfig("log_level: info"))) +// config := suite.Env().Agent.Config() +// require.Contains(suite.T(), config, "log_level: info") +// } +// +// # Having few environments +// +// You may sometime have few environments but several tests for each on them. +// You can still use [e2e.Suite.UpdateEnv] as explained in the previous section but using +// [Subtests] is an alternative solution. +// +// type subTestSuite struct { +// e2e.Suite[e2e.AgentEnv] +// } +// +// func TestSubTestSuite(t *testing.T) { +// e2e.Run(t, &subTestSuite{}, e2e.AgentStackDef(nil)) +// } +// +// func (suite *subTestSuite) TestLogDebug() { +// suite.UpdateEnv(e2e.AgentStackDef(nil, agent.WithAgentConfig("log_level: debug"))) +// suite.T().Run("MySubTest1", func(t *testing.T) { +// // Sub test 1 +// }) +// suite.T().Run("MySubTest2", func(t *testing.T) { +// // Sub test 2 +// }) +// } +// +// func (suite *subTestSuite) TestLogInfo() { +// suite.UpdateEnv(e2e.AgentStackDef(nil, agent.WithAgentConfig("log_level: info"))) +// suite.T().Run("MySubTest1", func(t *testing.T) { +// // Sub test 1 +// }) +// suite.T().Run("MySubTest2", func(t *testing.T) { +// // Sub test 2 +// }) +// } +// +// [Subtests]: https://go.dev/blog/subtests +// [suite]: https://pkg.go.dev/github.com/stretchr/testify/suite +// [testify Suite]: https://pkg.go.dev/github.com/stretchr/testify/suite +// [File Manager]: https://pkg.go.dev/github.com/DataDog/test-infra-definitions@main/components/command#FileManager +// [EC2 VM]: https://pkg.go.dev/github.com/DataDog/test-infra-definitions@main/scenarios/aws/vm/ec2VM +// [Agent]: https://pkg.go.dev/github.com/DataDog/test-infra-definitions@main/components/datadog/agent#Installer package e2e import ( @@ -55,6 +270,21 @@ type suiteConstraint[Env any] interface { initSuite(stackName string, stackDef *StackDefinition[Env], options ...func(*Suite[Env])) } +// Run runs the tests defined in e2eSuite +// +// t is an instance of type [*testing.T]. +// +// e2eSuite is a pointer to a structure with a [e2e.Suite] embbeded struct. +// +// stackDef defines the stack definition. +// +// options is an optional list of options like [DevMode], [SkipDeleteOnFailure] or [WithStackName]. +// +// type vmSuite struct { +// e2e.Suite[e2e.VMEnv] +// } +// // ... +// e2e.Run(t, &vmSuite{}, e2e.EC2VMStackDef()) func Run[Env any, T suiteConstraint[Env]](t *testing.T, e2eSuite T, stackDef *StackDefinition[Env], options ...func(*Suite[Env])) { suiteType := reflect.TypeOf(e2eSuite).Elem() name := suiteType.Name() @@ -79,20 +309,24 @@ func (suite *Suite[Env]) initSuite(stackName string, stackDef *StackDefinition[E } } -// WithStackName overrides the stack name. -// This function is useful only when using e2e.Run. +// WithStackName overrides the default stack name. +// This function is useful only when using [Run]. func WithStackName[Env any](stackName string) func(*Suite[Env]) { return func(suite *Suite[Env]) { suite.stackName = stackName } } +// DevMode enables dev mode. +// Dev mode doesn't destroy the environment when the test finished which can +// be useful when writing a new E2E test. func DevMode[Env any]() func(*Suite[Env]) { return func(suite *Suite[Env]) { suite.devMode = true } } +// SkipDeleteOnFailure doesn't destroy the environment when a test fail. func SkipDeleteOnFailure[Env any]() func(*Suite[Env]) { return func(suite *Suite[Env]) { suite.skipDeleteOnFailure = true @@ -101,10 +335,10 @@ func SkipDeleteOnFailure[Env any]() func(*Suite[Env]) { // Env returns the current environment. // In order to improve the efficiency, this function behaves as follow: -// - It creates the default environment if no environment exists. It happens only during the first call of the test suite. -// - It restores the default environment if UpdateEnv was not already called during this test. -// This avoid having to restore the default environment for each test even if UpdateEnv immedialy -// overrides this environment. +// - It creates the default environment if no environment exists. +// - It restores the default environment if [e2e.Suite.UpdateEnv] was not already called during this test. +// This avoid having to restore the default environment for each test even if [suite.UpdateEnv] immedialy +// overrides the environment. func (suite *Suite[Env]) Env() *Env { if suite.env == nil || !suite.isUpdateEnvCalledInThisTest { suite.UpdateEnv(suite.defaultStackDef) @@ -112,10 +346,22 @@ func (suite *Suite[Env]) Env() *Env { return suite.env } +// BeforeTest is executed right before the test starts and receives the suite and test names as input. +// This function is called by [testify Suite]. +// +// If you override BeforeTest in your custom test suite type, the function must call [e2e.Suite.BeforeTest]. +// +// [testify Suite]: https://pkg.go.dev/github.com/stretchr/testify/suite func (suite *Suite[Env]) BeforeTest(suiteName, testName string) { suite.isUpdateEnvCalledInThisTest = false } +// AfterTest is executed right after the test finishes and receives the suite and test names as input. +// This function is called by [testify Suite]. +// +// If you override AfterTest in your custom test suite type, the function must call [e2e.Suite.AfterTest]. +// +// [testify Suite]: https://pkg.go.dev/github.com/stretchr/testify/suite func (suite *Suite[Env]) AfterTest(suiteName, testName string) { if suite.T().Failed() && suite.firstFailTest == "" { // As far as I know, there is no way to prevent other tests from being @@ -130,6 +376,10 @@ func (suite *Suite[Env]) AfterTest(suiteName, testName string) { // SetupSuite method will run before the tests in the suite are run. // This function is called by [testify Suite]. +// +// If you override SetupSuite in your custom test suite type, the function must call [e2e.Suite.SetupSuite]. +// +// [testify Suite]: https://pkg.go.dev/github.com/stretchr/testify/suite func (suite *Suite[Env]) SetupSuite() { skipDelete, _ := runner.GetProfile().ParamStore().GetBoolWithDefault(parameters.SkipDeleteOnFailure, false) if skipDelete { @@ -145,6 +395,8 @@ func (suite *Suite[Env]) SetupSuite() { // TearDownTestSuite run after all the tests in the suite have been run. // This function is called by [testify Suite]. // +// If you override TearDownSuite in your custom test suite type, the function must call [e2e.Suite.TearDownSuite]. +// // [testify Suite]: https://pkg.go.dev/github.com/stretchr/testify/suite func (suite *Suite[Env]) TearDownSuite() { if runner.GetProfile().AllowDevMode() && suite.devMode { @@ -184,6 +436,9 @@ func createEnv[Env any](suite *Suite[Env], stackDef *StackDefinition[Env]) (*Env return env, stackOutput, err } +// UpdateEnv updates the environment. +// This affects only the test that calls this function. +// Test functions that don't call UpdateEnv have the environment defined by [e2e.Run]. func (suite *Suite[Env]) UpdateEnv(stackDef *StackDefinition[Env]) { if stackDef != suite.currentStackDef { if (suite.firstFailTest != "" || suite.T().Failed()) && suite.skipDeleteOnFailure { diff --git a/test/new-e2e/utils/e2e/stack_definition.go b/test/new-e2e/utils/e2e/stack_definition.go index 0fa48cd1d45d0..9d88fa5da6200 100644 --- a/test/new-e2e/utils/e2e/stack_definition.go +++ b/test/new-e2e/utils/e2e/stack_definition.go @@ -8,9 +8,10 @@ package e2e import ( "github.com/DataDog/datadog-agent/test/new-e2e/runner" "github.com/DataDog/datadog-agent/test/new-e2e/utils/e2e/client" - ec2vm "github.com/DataDog/test-infra-definitions/aws/scenarios/vm/ec2VM" - "github.com/DataDog/test-infra-definitions/common/vm" - "github.com/DataDog/test-infra-definitions/datadog/agent" + "github.com/DataDog/test-infra-definitions/components/datadog/agent" + "github.com/DataDog/test-infra-definitions/components/vm" + "github.com/DataDog/test-infra-definitions/scenarios/aws/ecs" + ec2vm "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2VM" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) @@ -23,6 +24,7 @@ func NewStackDef[Env any](envFactory func(ctx *pulumi.Context) (*Env, error), co return &StackDefinition[Env]{envFactory: envFactory, configMap: configMap} } +// EnvFactoryStackDef creates a custom stack definition func EnvFactoryStackDef[Env any](envFactory func(ctx *pulumi.Context) (*Env, error)) *StackDefinition[Env] { return NewStackDef(envFactory, runner.ConfigMap{}) } @@ -31,6 +33,10 @@ type VMEnv struct { VM *client.VM } +// EC2VMStackDef creates a stack definition containing a virtual machine. +// See [ec2vm.Params] for available options. +// +// [ec2vm.Params]: https://pkg.go.dev/github.com/DataDog/test-infra-definitions@main/scenarios/aws/vm/ec2VM#Params func EC2VMStackDef(options ...func(*ec2vm.Params) error) *StackDefinition[VMEnv] { noop := func(vm.VM) (VMEnv, error) { return VMEnv{}, nil } return CustomEC2VMStackDef(noop, options...) @@ -53,12 +59,21 @@ func CustomEC2VMStackDef[T any](fct func(vm.VM) (T, error), options ...func(*ec2 } type AgentEnv struct { - VM *client.VM - Agent *client.Agent + VM *client.VM + Agent *client.Agent + Fakeintake *client.Fakeintake } type Ec2VMOption = func(*ec2vm.Params) error +// AgentStackDef creates a stack definition containing a virtual machine and an Agent. +// +// See [ec2vm.Params] for available options for vmParams. +// +// See [agent.Params] for available options for agentParams. +// +// [ec2vm.Params]: https://pkg.go.dev/github.com/DataDog/test-infra-definitions@main/scenarios/aws/vm/ec2VM#Params +// [agent.Params]: https://pkg.go.dev/github.com/DataDog/test-infra-definitions@main/components/datadog/agent#Params func AgentStackDef(vmParams []Ec2VMOption, agentParams ...func(*agent.Params) error) *StackDefinition[AgentEnv] { return EnvFactoryStackDef( func(ctx *pulumi.Context) (*AgentEnv, error) { @@ -67,13 +82,20 @@ func AgentStackDef(vmParams []Ec2VMOption, agentParams ...func(*agent.Params) er return nil, err } + fakeintakeExporter, err := ecs.NewEcsFakeintake(vm.Infra) + if err != nil { + return nil, err + } + + agentParams = append(agentParams, agent.WithFakeintake(fakeintakeExporter)) installer, err := agent.NewInstaller(vm, agentParams...) if err != nil { return nil, err } return &AgentEnv{ - VM: client.NewVM(vm), - Agent: client.NewAgent(installer), + VM: client.NewVM(vm), + Agent: client.NewAgent(installer), + Fakeintake: client.NewFakeintake(fakeintakeExporter), }, nil }, ) diff --git a/tools/windows/DatadogAgentInstaller/CustomActions.Tests/UserCustomActions/UserCustomActionsDomainControllerTests.cs b/tools/windows/DatadogAgentInstaller/CustomActions.Tests/UserCustomActions/UserCustomActionsDomainControllerTests.cs index ac214ca6e68e9..218a3861c193e 100644 --- a/tools/windows/DatadogAgentInstaller/CustomActions.Tests/UserCustomActions/UserCustomActionsDomainControllerTests.cs +++ b/tools/windows/DatadogAgentInstaller/CustomActions.Tests/UserCustomActions/UserCustomActionsDomainControllerTests.cs @@ -22,8 +22,33 @@ public void ProcessDdAgentUserCredentials_Fails_With_No_Credentials_On_DomainCon .Be(ActionResult.Failure); Test.Properties.Should() - .OnlyContain(kvp => (kvp.Key == "DDAGENTUSER_FOUND" && kvp.Value == "false") || - (kvp.Key == "DDAGENTUSER_SID" && string.IsNullOrEmpty(kvp.Value))); + .BeEmpty(); + } + + [Theory] + [AutoData] + public void ProcessDdAgentUserCredentials_Succeeds_With_Creating_DomainUser_On_Domain_Controllers( + string ddAgentUserName, + string ddAgentUserPassword) + { + Test.Session + .Setup(session => session["DDAGENTUSER_NAME"]).Returns($"{Domain}\\{ddAgentUserName}"); + Test.Session + .Setup(session => session["DDAGENTUSER_PASSWORD"]).Returns(ddAgentUserPassword); + + Test.Create() + .ProcessDdAgentUserCredentials() + .Should() + .Be(ActionResult.Success); + + Test.Properties.Should() + .Contain("DDAGENTUSER_FOUND", "false").And + .Contain(kvp => kvp.Key == "DDAGENTUSER_SID" && string.IsNullOrEmpty(kvp.Value)).And + .Contain("DDAGENTUSER_PROCESSED_NAME", ddAgentUserName).And + .Contain("DDAGENTUSER_PROCESSED_DOMAIN", Domain).And + .Contain("DDAGENTUSER_PROCESSED_FQ_NAME", $"{Domain}\\{ddAgentUserName}").And + .Contain(kvp => kvp.Key == "DDAGENTUSER_RESET_PASSWORD" && string.IsNullOrEmpty(kvp.Value)).And + .Contain(kvp => kvp.Key == "DDAGENTUSER_PROCESSED_PASSWORD" && kvp.Value == ddAgentUserPassword); } [Theory] @@ -158,7 +183,7 @@ string ddAgentUserName .Should() .Be(ActionResult.Failure); - // The install will proceed with the default `ddagentuser` in the machine domain + // services don't exist so password is required Test.Properties.Should() .OnlyContain(kvp => (kvp.Key == "DDAGENTUSER_FOUND" && kvp.Value == "true") || (kvp.Key == "DDAGENTUSER_SID" && !string.IsNullOrEmpty(kvp.Value))); @@ -180,30 +205,9 @@ public void ProcessDdAgentUserCredentials_Fails_With_No_Credentials_But_Services .Should() .Be(ActionResult.Failure); - // The install will proceed with the default `ddagentuser` in the machine domain - Test.Properties.Should() - .OnlyContain(kvp => (kvp.Key == "DDAGENTUSER_FOUND" && kvp.Value == "false") || - (kvp.Key == "DDAGENTUSER_SID" && string.IsNullOrEmpty(kvp.Value))); - } - - [Theory] - [AutoData] - public void ProcessDdAgentUserCredentials_Fails_With_gMsaAccount_Missing_Domain_Part_On_DomainController( - string ddAgentUserName) - { - Test.WithManagedServiceAccount(ddAgentUserName); - - Test.Session - .Setup(session => session["DDAGENTUSER_NAME"]).Returns($"\\{ddAgentUserName}"); - - Test.Create() - .ProcessDdAgentUserCredentials() - .Should() - .Be(ActionResult.Failure); - + // Domain controller requires username be present Test.Properties.Should() - .OnlyContain(kvp => (kvp.Key == "DDAGENTUSER_FOUND" && kvp.Value == "false") || - (kvp.Key == "DDAGENTUSER_SID" && string.IsNullOrEmpty(kvp.Value))); + .BeEmpty(); } } } diff --git a/tools/windows/DatadogAgentInstaller/CustomActions/UserCustomActions.cs b/tools/windows/DatadogAgentInstaller/CustomActions/UserCustomActions.cs index 6c51ba45efb77..7f7c217908cb1 100644 --- a/tools/windows/DatadogAgentInstaller/CustomActions/UserCustomActions.cs +++ b/tools/windows/DatadogAgentInstaller/CustomActions/UserCustomActions.cs @@ -267,6 +267,13 @@ public ActionResult ProcessDdAgentUserCredentials(bool calledFromUIControl = fal if (string.IsNullOrEmpty(ddAgentUserName)) { + if (isDomainController) + { + // require user to provide a username on domain controllers so that the customer is explicit + // about the username/password that will be created on their domain if it does not exist. + errorDialogMessage = "A username was not provided. A username is a required when installing on Domain Controllers."; + throw new InvalidOperationException(errorDialogMessage); + } // Creds are not in registry and user did not pass a value, use default account name ddAgentUserName = $"{GetDefaultDomainPart()}\\ddagentuser"; _session.Log($"No creds provided, using default {ddAgentUserName}"); @@ -320,12 +327,6 @@ public ActionResult ProcessDdAgentUserCredentials(bool calledFromUIControl = fal _session["DDAGENTUSER_SID"] = null; _session.Log($"User {ddAgentUserName} doesn't exist."); - if (isDomainController) - { - errorDialogMessage = "The account does not exist. The account must already exist when installing on Domain Controllers."; - throw new InvalidOperationException(errorDialogMessage); - } - ParseUserName(ddAgentUserName, out userName, out domain); } @@ -346,6 +347,7 @@ public ActionResult ProcessDdAgentUserCredentials(bool calledFromUIControl = fal // We are trying to create a user in a domain on a non-domain controller. // This must run *after* checking that the domain is not empty. if (!userFound && + !isDomainController && domain != Environment.MachineName) { errorDialogMessage = "The account does not exist. Domain accounts must already exist when installing on Domain Clients."; @@ -360,6 +362,15 @@ public ActionResult ProcessDdAgentUserCredentials(bool calledFromUIControl = fal _session["DDAGENTUSER_PROCESSED_FQ_NAME"] = $"{domain}\\{userName}"; _session["DDAGENTUSER_RESET_PASSWORD"] = null; + if (!userFound && + isDomainController && + string.IsNullOrEmpty(ddAgentUserPassword)) + { + // require user to provide a password on domain controllers so that the customer is explicit + // about the username/password that will be created on their domain if it does not exist. + errorDialogMessage = "A password was not provided. A password is a required when installing on Domain Controllers."; + throw new InvalidOperationException(errorDialogMessage); + } if (!isServiceAccount && !isDomainAccount && string.IsNullOrEmpty(ddAgentUserPassword)) diff --git a/tools/windows/DatadogAgentInstaller/WixSetup.Tests/AgentVersionTests.cs b/tools/windows/DatadogAgentInstaller/WixSetup.Tests/AgentVersionTests.cs index 4a4ec55911988..f7342bd3a0c39 100644 --- a/tools/windows/DatadogAgentInstaller/WixSetup.Tests/AgentVersionTests.cs +++ b/tools/windows/DatadogAgentInstaller/WixSetup.Tests/AgentVersionTests.cs @@ -32,7 +32,7 @@ public void Should_Parse_Stable_Version_OmnibusFormat_Correctly() [Fact] public void Should_Parse_Nightly_Version_OmnibusFormat_Correctly() { - // Output of inv -e agent.version --omnibus-format on a nightly + // Output of inv agent.version --omnibus-format on a nightly var packageVersion = "7.40.0~rc.2+git.309.1240df2"; var version = new Datadog.AgentVersion(packageVersion); version.PackageVersion.Should().Be(packageVersion); @@ -45,7 +45,7 @@ public void Should_Parse_Nightly_Version_OmnibusFormat_Correctly() [Fact] public void Should_Parse_Nightly_Version_UrlSafe_Correctly() { - // Output of inv -e agent.version --url-safe on an RC + // Output of inv agent.version --url-safe on an RC var packageVersion = "7.43.1-rc.3.git.485.14b9337"; var version = new Datadog.AgentVersion(packageVersion); version.PackageVersion.Should().Be(packageVersion); diff --git a/tools/windows/DatadogAgentInstaller/WixSetup/Datadog/AgentInstaller.cs b/tools/windows/DatadogAgentInstaller/WixSetup/Datadog/AgentInstaller.cs index 56a6843f13f6a..3fe061497f003 100644 --- a/tools/windows/DatadogAgentInstaller/WixSetup/Datadog/AgentInstaller.cs +++ b/tools/windows/DatadogAgentInstaller/WixSetup/Datadog/AgentInstaller.cs @@ -243,6 +243,20 @@ public Project ConfigureProject() .FindAll("Feature") .First(x => x.HasAttribute("Id", value => value == "MainApplication")) .AddElement("MergeRef", "Id=ddnpminstall"); + // Conditionally include the APM injection MSM while it is in active development to make it easier + // to build/ship without it. + if (!string.IsNullOrEmpty(Environment.GetEnvironmentVariable("WINDOWS_APMINJECT_MODULE"))) + { + document + .FindAll("Directory") + .First(x => x.HasAttribute("Id", value => value == "AGENT")) + .AddElement("Merge", + $"Id=ddapminstall; SourceFile={BinSource}\\agent\\ddapminstall.msm; DiskId=1; Language=1033"); + document + .FindAll("Feature") + .First(x => x.HasAttribute("Id", value => value == "MainApplication")) + .AddElement("MergeRef", "Id=ddapminstall"); + } }; project.WixSourceFormated += (ref string content) => WixSourceFormated?.Invoke(content); project.WixSourceSaved += name => WixSourceSaved?.Invoke(name);