diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index 61f8c6b9e13..a4900deb95c 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -92,7 +92,7 @@ pipeline { axis { name 'PLATFORM' // Orka workers are not healthy (memory and connectivity issues) - values 'ubuntu-20.04 && immutable', 'aws && aarch64', 'windows-2016 && windows-immutable', 'windows-2022 && windows-immutable' //, 'macos12 && x86_64' + values 'ubuntu-20.04 && immutable', 'aws && aarch64 && gobld/diskSizeGb:200', 'windows-2016 && windows-immutable', 'windows-2022 && windows-immutable' //, 'macos12 && x86_64' } } stages { diff --git a/.ci/bump-go-release-version.sh b/.ci/bump-go-release-version.sh index c5541f6e644..c7d262d328f 100755 --- a/.ci/bump-go-release-version.sh +++ b/.ci/bump-go-release-version.sh @@ -24,6 +24,9 @@ echo "Update go version ${GO_RELEASE_VERSION}" echo "${GO_RELEASE_VERSION}" > .go-version git add .go-version +${SED} -E -e "s#(go:) \"[0-9]+\.[0-9]+\.[0-9]+\"#\1 \"${GO_RELEASE_VERSION}\"#g" .golangci.yml +git add .golangci.yml + find . -maxdepth 3 -name Dockerfile -print0 | while IFS= read -r -d '' line; do ${SED} -E -e "s#(FROM golang):[0-9]+\.[0-9]+\.[0-9]+#\1:${GO_RELEASE_VERSION}#g" "$line" diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index ef4ecda8836..0b23db09a80 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -33,7 +33,7 @@ List here all the items you have verified BEFORE sending this PR. Please DO NOT - [ ] I have made corresponding changes to the documentation - [ ] I have made corresponding change to the default configuration files - [ ] I have added tests that prove my fix is effective or that my feature works -- [ ] I have added an entry in `CHANGELOG.next.asciidoc` or `CHANGELOG-developer.next.asciidoc`. +- [ ] I have added an entry in `./changelog/fragments` using the [changelog tool](https://github.com/elastic/elastic-agent#changelog) ## Author's Checklist diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml deleted file mode 100644 index d0f29a0fd25..00000000000 --- a/.github/workflows/changelog.yml +++ /dev/null @@ -1,17 +0,0 @@ -name: Changelog -on: [pull_request] - -jobs: - fragments: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Fetch Go version from .go-version - run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV - - uses: actions/setup-go@v3 - with: - go-version: ${{ env.GO_VERSION }} - - name: check pr-has-fragment - run: | - GOBIN=$PWD/bin go install github.com/elastic/elastic-agent-changelog-tool@latest - ./bin/elastic-agent-changelog-tool pr-has-fragment --repo ${{ github.event.repository.name }} ${{github.event.number}} diff --git a/.github/workflows/elastic-agent-project-board.yml b/.github/workflows/elastic-agent-project-board.yml deleted file mode 100644 index e6add0d093c..00000000000 --- a/.github/workflows/elastic-agent-project-board.yml +++ /dev/null @@ -1,42 +0,0 @@ -name: Add to Elastic Agent Data Plane or Control Plane Board -on: - issues: - types: - - labeled -jobs: - add_to_data_plane-project: - runs-on: ubuntu-latest - if: | - github.event.label.name == 'Team:Elastic-Agent-Data-Plane' - steps: - - uses: octokit/graphql-action@v2.x - id: add_to_project - with: - headers: '{"GraphQL-Features": "projects_next_graphql"}' - query: | - mutation add_to_project($projectid:[ID!]!,$contentid:ID!) { - updateIssue(input: {id:$contentid, projectIds:$projectid}) { - clientMutationId - } - } - projectid: "PRO_kwDOAGc3Zs4AzG8z" - contentid: ${{ github.event.issue.node_id }} - GITHUB_TOKEN: ${{ secrets.ELASTIC_AGENT_PROJECT_BOARD_TOKEN }} - add_to_control_plane-project: - runs-on: ubuntu-latest - if: | - github.event.label.name == 'Team:Elastic-Agent-Control-Plane' - steps: - - uses: octokit/graphql-action@v2.x - id: add_to_project - with: - headers: '{"GraphQL-Features": "projects_next_graphql"}' - query: | - mutation add_to_project($projectid:[ID!]!,$contentid:ID!) { - updateIssue(input: {id:$contentid, projectIds:$projectid}) { - clientMutationId - } - } - projectid: "PRO_kwDOAGc3Zs4AzG9E" - contentid: ${{ github.event.issue.node_id }} - GITHUB_TOKEN: ${{ secrets.ELASTIC_AGENT_PROJECT_BOARD_TOKEN }} diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 62d4006737c..73116460c67 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -20,14 +20,9 @@ jobs: steps: - uses: actions/checkout@v3 - # Uses Go version from the repository. - - name: Read .go-version file - id: goversion - run: echo "::set-output name=version::$(cat .go-version)" - - uses: actions/setup-go@v3 with: - go-version: "${{ steps.goversion.outputs.version }}" + go-version-file: .go-version - name: golangci-lint uses: golangci/golangci-lint-action@v3 diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index bf3e5eed775..23a7c6a4da8 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -12,11 +12,9 @@ jobs: runs-on: macos-latest steps: - uses: actions/checkout@v3 - - name: Fetch Go version from .go-version - run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV - uses: actions/setup-go@v3 with: - go-version: ${{ env.GO_VERSION }} + go-version-file: .go-version - name: Install dependencies run: go install github.com/magefile/mage - name: Run build diff --git a/.gitignore b/.gitignore index 57546893fb4..476cfd50764 100644 --- a/.gitignore +++ b/.gitignore @@ -45,7 +45,6 @@ fleet.enc.lock # Files generated with the bump version automations *.bck - # agent build/ elastic-agent diff --git a/.go-version b/.go-version index 1a31d398cf5..cafc0b7add4 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.18.8 +1.18.9 diff --git a/.golangci.yml b/.golangci.yml index 96e131c8ade..4071c060ada 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -101,7 +101,7 @@ linters-settings: gosimple: # Select the Go version to target. The default is '1.13'. - go: "1.18.7" + go: "1.18.9" nakedret: # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 @@ -121,17 +121,17 @@ linters-settings: staticcheck: # Select the Go version to target. The default is '1.13'. - go: "1.18.7" + go: "1.18.9" checks: ["all"] stylecheck: # Select the Go version to target. The default is '1.13'. - go: "1.18.7" + go: "1.18.9" checks: ["all"] unused: # Select the Go version to target. The default is '1.13'. - go: "1.18.7" + go: "1.18.9" gosec: excludes: diff --git a/.mergify.yml b/.mergify.yml index 528df9b498b..50ba909e2ba 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -233,3 +233,16 @@ pull_request_rules: labels: - "backport" title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" + - name: backport patches to 8.6 branch + conditions: + - merged + - label=backport-v8.6.0 + actions: + backport: + assignees: + - "{{ author }}" + branches: + - "8.6" + labels: + - "backport" + title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" diff --git a/Dockerfile b/Dockerfile index fd56ef5e2ff..50e50dcf32d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.18.7 +ARG GO_VERSION=1.18.9 FROM circleci/golang:${GO_VERSION} diff --git a/NOTICE.txt b/NOTICE.txt index 7bc5103d040..d7ac0b986c4 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1,5 +1,5 @@ Elastic Beats -Copyright 2014-2022 Elasticsearch BV +Copyright 2014-2023 Elasticsearch BV This product includes software developed by The Apache Software Foundation (http://www.apache.org/). @@ -816,15 +816,105 @@ Contents of probable licence file $GOMODCACHE/github.com/dolmen-go/contextio@v0. -------------------------------------------------------------------------------- Dependency : github.com/elastic/e2e-testing -Version: v1.99.2-0.20220117192005-d3365c99b9c4 +Version: v1.99.2-0.20221205111528-ade3c840d0c0 Licence type (autodetected): Elastic -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/e2e-testing@v1.99.2-0.20220117192005-d3365c99b9c4/cli/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/e2e-testing@v1.99.2-0.20221205111528-ade3c840d0c0/LICENSE.txt: -Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -or more contributor license agreements. Licensed under the Elastic License; -you may not use this file except in compliance with the Elastic License. +Elastic License 2.0 + +URL: https://www.elastic.co/licensing/elastic-license + +## Acceptance + +By using the software, you agree to all of the terms and conditions below. + +## Copyright License + +The licensor grants you a non-exclusive, royalty-free, worldwide, +non-sublicensable, non-transferable license to use, copy, distribute, make +available, and prepare derivative works of the software, in each case subject to +the limitations and conditions below. + +## Limitations + +You may not provide the software to third parties as a hosted or managed +service, where the service provides users with access to any substantial set of +the features or functionality of the software. + +You may not move, change, disable, or circumvent the license key functionality +in the software, and you may not remove or obscure any functionality in the +software that is protected by the license key. + +You may not alter, remove, or obscure any licensing, copyright, or other notices +of the licensor in the software. Any use of the licensor’s trademarks is subject +to applicable law. + +## Patents + +The licensor grants you a license, under any patent claims the licensor can +license, or becomes able to license, to make, have made, use, sell, offer for +sale, import and have imported the software, in each case subject to the +limitations and conditions in this license. This license does not cover any +patent claims that you cause to be infringed by modifications or additions to +the software. If you or your company make any written claim that the software +infringes or contributes to infringement of any patent, your patent license for +the software granted under these terms ends immediately. If your company makes +such a claim, your patent license ends immediately for work on behalf of your +company. + +## Notices + +You must ensure that anyone who gets a copy of any part of the software from you +also gets a copy of these terms. + +If you modify the software, you must include in any modified copies of the +software prominent notices stating that you have modified the software. + +## No Other Rights + +These terms do not imply any licenses other than those expressly granted in +these terms. + +## Termination + +If you use the software in violation of these terms, such use is not licensed, +and your licenses will automatically terminate. If the licensor provides you +with a notice of your violation, and you cease all violation of this license no +later than 30 days after you receive that notice, your licenses will be +reinstated retroactively. However, if you violate these terms after such +reinstatement, any additional violation of these terms will cause your licenses +to terminate automatically and permanently. + +## No Liability + +*As far as the law allows, the software comes as is, without any warranty or +condition, and the licensor will not be liable to you for any damages arising +out of these terms or the use or nature of the software, under any kind of +legal claim.* + +## Definitions + +The **licensor** is the entity offering these terms, and the **software** is the +software the licensor makes available under these terms, including any portion +of it. + +**you** refers to the individual or entity agreeing to these terms. + +**your company** is any legal entity, sole proprietorship, or other kind of +organization that you work for, plus all organizations that have control over, +are under the control of, or are under common control with that +organization. **control** means ownership of substantially all the assets of an +entity, or the power to direct its management and policies by vote, contract, or +otherwise. Control can be direct or indirect. + +**your licenses** are all the licenses granted to you for the software under +these terms. + +**use** means anything you do with the software requiring one of your licenses. + +**trademark** means trademarks, service marks, and similar rights. -------------------------------------------------------------------------------- @@ -1040,11 +1130,11 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-a -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-client/v7 -Version: v7.0.0-20220804181728-b0328d2fe484 +Version: v7.0.2-0.20221129150247-15881a8e64ef Licence type (autodetected): Elastic -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-client/v7@v7.0.0-20220804181728-b0328d2fe484/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-client/v7@v7.0.2-0.20221129150247-15881a8e64ef/LICENSE.txt: ELASTIC LICENSE AGREEMENT @@ -1273,11 +1363,11 @@ SOFTWARE -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-libs -Version: v0.2.6 +Version: v0.2.15 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.2.6/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.2.15/LICENSE: Apache License Version 2.0, January 2004 @@ -2945,11 +3035,11 @@ freely, subject to the following restrictions: -------------------------------------------------------------------------------- Dependency : github.com/magefile/mage -Version: v1.13.0 +Version: v1.14.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/magefile/mage@v1.13.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/magefile/mage@v1.14.0/LICENSE: Apache License Version 2.0, January 2004 @@ -3992,6 +4082,37 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- +Dependency : github.com/sirupsen/logrus +Version: v1.8.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/sirupsen/logrus@v1.8.1/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + -------------------------------------------------------------------------------- Dependency : github.com/spf13/cobra Version: v1.3.0 @@ -5331,11 +5452,11 @@ THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : golang.org/x/crypto -Version: v0.0.0-20210817164053-32db794688a5 +Version: v0.0.0-20211117183948-ae814b36b871 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/crypto@v0.0.0-20210817164053-32db794688a5/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/crypto@v0.0.0-20211117183948-ae814b36b871/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -5477,6 +5598,43 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- +Dependency : golang.org/x/time +Version: v0.3.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/time@v0.3.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + -------------------------------------------------------------------------------- Dependency : golang.org/x/tools Version: v0.1.9 @@ -7218,11 +7376,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/cenkalti/backoff/v4 -Version: v4.1.1 +Version: v4.1.2 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/cenkalti/backoff/v4@v4.1.1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/cenkalti/backoff/v4@v4.1.2/LICENSE: The MIT License (MIT) @@ -12324,11 +12482,11 @@ Contents of probable licence file $GOMODCACHE/github.com/moby/spdystream@v0.2.0/ -------------------------------------------------------------------------------- Dependency : github.com/moby/term -Version: v0.0.0-20210610120745-9d4ed1856297 +Version: v0.0.0-20210619224110-3f7ff695adc6 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/moby/term@v0.0.0-20210610120745-9d4ed1856297/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/moby/term@v0.0.0-20210619224110-3f7ff695adc6/LICENSE: Apache License @@ -13834,37 +13992,6 @@ DEALINGS IN THE SOFTWARE. --------------------------------------------------------------------------------- -Dependency : github.com/sirupsen/logrus -Version: v1.8.1 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/sirupsen/logrus@v1.8.1/LICENSE: - -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - -------------------------------------------------------------------------------- Dependency : github.com/spf13/afero Version: v1.6.0 @@ -15340,43 +15467,6 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------------- -Dependency : golang.org/x/time -Version: v0.0.0-20210723032227-1f47c861a9ac -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/golang.org/x/time@v0.0.0-20210723032227-1f47c861a9ac/LICENSE: - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -------------------------------------------------------------------------------- Dependency : golang.org/x/xerrors Version: v0.0.0-20200804184101-5ec99f83aff1 @@ -17399,11 +17489,11 @@ Contents of probable licence file $GOMODCACHE/sigs.k8s.io/structured-merge-diff/ -------------------------------------------------------------------------------- Dependency : sigs.k8s.io/yaml -Version: v1.2.0 +Version: v1.3.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/sigs.k8s.io/yaml@v1.2.0/LICENSE: +Contents of probable licence file $GOMODCACHE/sigs.k8s.io/yaml@v1.3.0/LICENSE: The MIT License (MIT) diff --git a/_meta/config/common.p2.yml.tmpl b/_meta/config/common.p2.yml.tmpl index 762b10bfc71..f6d930898a6 100644 --- a/_meta/config/common.p2.yml.tmpl +++ b/_meta/config/common.p2.yml.tmpl @@ -11,7 +11,8 @@ outputs: inputs: - type: system/metrics - + # Each input must have a unique ID. + id: unique-system-metrics-input # Namespace name must conform to the naming conventions for Elasticsearch indices, cannot contain dashes (-), and cannot exceed 100 bytes # For index naming restrictions, see https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html#indices-create-api-path-params data_stream.namespace: default @@ -117,6 +118,9 @@ inputs: # address: localhost # # port for the GRPC server that spawned processes connect back to. # port: 6789 +# # max_message_size limits the message size in agent internal communication +# # default is 100MB +# max_message_size: 104857600 # agent.retry: # # Enabled determines whether retry is possible. Default is false. @@ -179,7 +183,7 @@ agent.logging.to_stderr: true # Configure log file size limit. If limit is reached, log file will be # automatically rotated - #rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 20971520 # = 20MB # Number of rotated log files to keep. Oldest files will be deleted first. #keepfiles: 7 diff --git a/_meta/config/common.reference.p2.yml.tmpl b/_meta/config/common.reference.p2.yml.tmpl index e24b6082f62..d48d590d618 100644 --- a/_meta/config/common.reference.p2.yml.tmpl +++ b/_meta/config/common.reference.p2.yml.tmpl @@ -11,7 +11,8 @@ outputs: inputs: - type: system/metrics - + # Each input must have a unique ID. + id: unique-system-metrics-input # Namespace name must conform to the naming conventions for Elasticsearch indices, cannot contain dashes (-), and cannot exceed 100 bytes # For index naming restrictions, see https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html#indices-create-api-path-params data_stream.namespace: default @@ -84,6 +85,9 @@ inputs: # address: localhost # # port for the GRPC server that spawned processes connect back to. # port: 6789 +# # max_message_size limits the message size in agent internal communication +# # default is 100MB +# max_message_size: 104857600 # agent.retry: # # Enabled determines whether retry is possible. Default is false. @@ -179,7 +183,7 @@ agent.logging.to_stderr: true # Configure log file size limit. If limit is reached, log file will be # automatically rotated - #rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 20971520 # = 20MB # Number of rotated log files to keep. Oldest files will be deleted first. #keepfiles: 7 diff --git a/_meta/config/elastic-agent.docker.yml.tmpl b/_meta/config/elastic-agent.docker.yml.tmpl index 659aa4d07ef..314bbea3c0e 100644 --- a/_meta/config/elastic-agent.docker.yml.tmpl +++ b/_meta/config/elastic-agent.docker.yml.tmpl @@ -10,7 +10,8 @@ outputs: inputs: - type: system/metrics - + # Each input must have a unique ID. + id: unique-system-metrics-input # Namespace name must conform to the naming conventions for Elasticsearch indices, cannot contain dashes (-), and cannot exceed 100 bytes # For index naming restrictions, see https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html#indices-create-api-path-params data_stream.namespace: default @@ -83,6 +84,9 @@ inputs: # address: localhost # # port for the GRPC server that spawned processes connect back to. # port: 6789 +# # max_message_size limits the message size in agent internal communication +# # default is 100MB +# max_message_size: 104857600 # agent.retry: # # Enabled determines whether retry is possible. Default is false. @@ -178,7 +182,7 @@ agent.logging.to_stderr: true # Configure log file size limit. If limit is reached, log file will be # automatically rotated - #rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 20971520 # = 20MB # Number of rotated log files to keep. Oldest files will be deleted first. #keepfiles: 7 diff --git a/_meta/elastic-agent.yml b/_meta/elastic-agent.yml index 0939ebcdc67..80839757f9a 100644 --- a/_meta/elastic-agent.yml +++ b/_meta/elastic-agent.yml @@ -10,7 +10,8 @@ outputs: inputs: - type: system/metrics - + # Each input must have a unique ID. + id: unique-system-metrics-input # Namespace name must conform to the naming conventions for Elasticsearch indices, cannot contain dashes (-), and cannot exceed 100 bytes # For index naming restrictions, see https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html#indices-create-api-path-params data_stream.namespace: default @@ -170,7 +171,7 @@ agent.logging.to_stderr: true # Configure log file size limit. If limit is reached, log file will be # automatically rotated - #rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 20971520 # = 20MB # Number of rotated log files to keep. Oldest files will be deleted first. #keepfiles: 7 diff --git a/changelog/8.5.1.yaml b/changelog/8.5.1.yaml new file mode 100644 index 00000000000..9a5e2f352a3 --- /dev/null +++ b/changelog/8.5.1.yaml @@ -0,0 +1,38 @@ +version: 8.5.1 +entries: + - kind: bug-fix + summary: Fix how multiple Fleet Server hosts are handled + description: It fixes the bug when the Elastic Agent would be enrolled using a valid Fleet Server URL, but the policy would contain more than one, being the first URL unreachable. In that case the Elastic Agent would enroll with Fleet Server, but become unhealthy as it'd get stuck trying only the first, unreachable Fleet Server host. + component: "" + pr: + - https://github.com/elastic/elastic-agent/pull/1329 + issue: + - https://github.com/elastic/elastic-agent/issues/1328 + timestamp: 1666281194 + file: + name: 1666281194-Fix-how-multiple-Fleet-Server-hosts-are-handled.yaml + checksum: 6fb9e56c4b750ec352431cef72a4c45251f54c7f + - kind: bug-fix + summary: 'Fix: Windows Agent Left Unhealthy After Removing Endpoint Integration' + description: "" + component: "" + pr: + - https://github.com/elastic/elastic-agent/pull/1286 + issue: + - https://github.com/elastic/elastic-agent/issues/1262 + timestamp: 1666611696 + file: + name: 1666611696-fix_service_stop_timeout.yaml + checksum: 212c7142d5ac4f6f061f7da0149d7600e97df2d5 + - kind: feature + summary: Improve shutdown logs + description: "" + component: cmd, handler, upgrade + pr: + - https://github.com/elastic/elastic-agent/pull/1618 + issue: + - https://github.com/elastic/elastic-agent/issues/1358 + timestamp: 1666789812 + file: + name: 1666789812-Improve-shutdown-logs.yaml + checksum: 0292242b3855f5e8e7701f9149404a71b04729f9 diff --git a/changelog/8.5.2.yaml b/changelog/8.5.2.yaml new file mode 100644 index 00000000000..132c4607cbc --- /dev/null +++ b/changelog/8.5.2.yaml @@ -0,0 +1,2 @@ +version: 8.5.2 +# No changes in this release. diff --git a/changelog/fragments/1665780486-heartbeat-es-output-only.yaml b/changelog/fragments/1665780486-heartbeat-es-output-only.yaml index 1e3b4059ddf..c8a9a0e6962 100644 --- a/changelog/fragments/1665780486-heartbeat-es-output-only.yaml +++ b/changelog/fragments/1665780486-heartbeat-es-output-only.yaml @@ -24,7 +24,7 @@ component: synthetics-integration # If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. # NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. # Please provide it if you are adding a fragment for a different PR. -pr: 1491 +pr: https://github.com/elastic/elastic-agent/pull/1491 # Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). # If not present is automatically filled by the tooling with the issue linked to the PR number. diff --git a/changelog/fragments/1666095433-service_runtime_v2.yaml b/changelog/fragments/1666095433-service_runtime_v2.yaml index f54aa7e5d9c..25d5067fb8c 100644 --- a/changelog/fragments/1666095433-service_runtime_v2.yaml +++ b/changelog/fragments/1666095433-service_runtime_v2.yaml @@ -1,5 +1,5 @@ kind: feature summary: Service runtime for V2 description: Service runtime for V2, tailored specifically for Endpoint service. -pr: 1529 -issue: 1069 +pr: https://github.com/elastic/elastic-agent/pull/1529 +issue: https://github.com/elastic/elastic-agent/issues/1069 diff --git a/changelog/fragments/1666611696-fix_service_stop_timeout.yaml b/changelog/fragments/1666611696-fix_service_stop_timeout.yaml deleted file mode 100644 index 5125282618f..00000000000 --- a/changelog/fragments/1666611696-fix_service_stop_timeout.yaml +++ /dev/null @@ -1,4 +0,0 @@ -kind: bug-fix -summary: "Fix: Windows Agent Left Unhealthy After Removing Endpoint Integration" -pr: 1286 -issue: 1262 diff --git a/changelog/fragments/1667571017-Add-support-for-running-the-elastic-agent-shipper.yaml b/changelog/fragments/1667571017-Add-support-for-running-the-elastic-agent-shipper.yaml index 26430b05741..718aef05879 100644 --- a/changelog/fragments/1667571017-Add-support-for-running-the-elastic-agent-shipper.yaml +++ b/changelog/fragments/1667571017-Add-support-for-running-the-elastic-agent-shipper.yaml @@ -24,8 +24,8 @@ component: # If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. # NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. # Please provide it if you are adding a fragment for a different PR. -pr: 1527 +pr: https://github.com/elastic/elastic-agent/pull/1527 # Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). # If not present is automatically filled by the tooling with the issue linked to the PR number. -issue: 219 +issue: https://github.com/elastic/elastic-agent/issues/219 diff --git a/changelog/fragments/1668483030-common-expression-language.yaml b/changelog/fragments/1668483030-common-expression-language.yaml index 824d7f5a501..0391c624f30 100644 --- a/changelog/fragments/1668483030-common-expression-language.yaml +++ b/changelog/fragments/1668483030-common-expression-language.yaml @@ -24,7 +24,7 @@ component: spec # If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. # NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. # Please provide it if you are adding a fragment for a different PR. -pr: 1719 +pr: https://github.com/elastic/elastic-agent/pull/1719 # Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). # If not present is automatically filled by the tooling with the issue linked to the PR number. diff --git a/changelog/fragments/1666281194-Fix-how-multiple-Fleet-Server-hosts-are-handled.yaml b/changelog/fragments/1668591286-fix-beats-logs.yaml similarity index 77% rename from changelog/fragments/1666281194-Fix-how-multiple-Fleet-Server-hosts-are-handled.yaml rename to changelog/fragments/1668591286-fix-beats-logs.yaml index c0f13aa3d9c..05e28b1d548 100644 --- a/changelog/fragments/1666281194-Fix-how-multiple-Fleet-Server-hosts-are-handled.yaml +++ b/changelog/fragments/1668591286-fix-beats-logs.yaml @@ -11,24 +11,25 @@ kind: bug-fix # Change summary; a 80ish characters long description of the change. -summary: Fix how multiple Fleet Server hosts are handled +summary: "Monitoring: fix JSON logs parsing for Beats" # Long description; in case the summary is not enough to describe the change # this field accommodate a description without length limits. -description: It fixes the bug when the Elastic Agent would be enrolled using - a valid Fleet Server URL, but the policy would contain more than one, being - the first URL unreachable. In that case the Elastic Agent would enroll with - Fleet Server, but become unhealthy as it'd get stuck trying only the first, - unreachable Fleet Server host. +description: >- + Fixes the JSON parsing from Beats logs. The configuration sent to + Filebeat did not contain the input type, hence a log input instead of a + filestream one was being started. + + This commit also improves the configuraiton of the ndjson parser. # Affected component; a word indicating the component this changeset affects. -#component: +component: monitoring # PR number; optional; the PR number that added the changeset. # If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. # NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. # Please provide it if you are adding a fragment for a different PR. -pr: 1329 +pr: https://github.com/elastic/elastic-agent/pull/1735 # Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). # If not present is automatically filled by the tooling with the issue linked to the PR number. diff --git a/changelog/fragments/1668645651-apm-fleet-config.yaml b/changelog/fragments/1668645651-apm-fleet-config.yaml new file mode 100644 index 00000000000..7db14c6f0d3 --- /dev/null +++ b/changelog/fragments/1668645651-apm-fleet-config.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: bug-fix + +# Change summary; a 80ish characters long description of the change. +summary: V2 Provide Fleet configuration to APM server + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/1745 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: https://github.com/elastic/elastic-agent/issues/1737 diff --git a/changelog/fragments/1666789812-Improve-shutdown-logs.yaml b/changelog/fragments/1668679987-apm-monitoring.yaml similarity index 93% rename from changelog/fragments/1666789812-Improve-shutdown-logs.yaml rename to changelog/fragments/1668679987-apm-monitoring.yaml index 091e2570ae6..fed7afe9177 100644 --- a/changelog/fragments/1666789812-Improve-shutdown-logs.yaml +++ b/changelog/fragments/1668679987-apm-monitoring.yaml @@ -8,24 +8,24 @@ # - security: impacts on the security of a product or a user’s deployment. # - upgrade: important information for someone upgrading from a prior version # - other: does not fit into any of the other categories -kind: feature +kind: bug-fix # Change summary; a 80ish characters long description of the change. -summary: Improve shutdown logs +summary: apm-monitoring # Long description; in case the summary is not enough to describe the change # this field accommodate a description without length limits. #description: # Affected component; a word indicating the component this changeset affects. -component: cmd, handler, upgrade +component: # PR number; optional; the PR number that added the changeset. # If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. # NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. # Please provide it if you are adding a fragment for a different PR. -pr: 1618 +#pr: 1234 # Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). # If not present is automatically filled by the tooling with the issue linked to the PR number. -issue: 1358 +#issue: 1234 diff --git a/changelog/fragments/1665784342-use-stack-version-npm-synthetics.yaml b/changelog/fragments/1669049916-CHANGELOG.asciidoc.yaml similarity index 89% rename from changelog/fragments/1665784342-use-stack-version-npm-synthetics.yaml rename to changelog/fragments/1669049916-CHANGELOG.asciidoc.yaml index a928c800d1e..44a1cdd87d5 100644 --- a/changelog/fragments/1665784342-use-stack-version-npm-synthetics.yaml +++ b/changelog/fragments/1669049916-CHANGELOG.asciidoc.yaml @@ -11,20 +11,20 @@ kind: feature # Change summary; a 80ish characters long description of the change. -summary: use-stack-version-npm-synthetics +summary: Adds support for a new executable used to prevent container drift. # Long description; in case the summary is not enough to describe the change # this field accommodate a description without length limits. -description: Always npm i the stack_release version of @elastic/synthetics +#description: # Affected component; a word indicating the component this changeset affects. -component: synthetics-integration +component: cloud-defend # PR number; optional; the PR number that added the changeset. # If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. # NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. # Please provide it if you are adding a fragment for a different PR. -pr: 1528 +pr: https://github.com/elastic/elastic-agent/pull/1764 # Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). # If not present is automatically filled by the tooling with the issue linked to the PR number. diff --git a/changelog/fragments/1669066497-struct-compare-unit-update.yaml b/changelog/fragments/1669066497-struct-compare-unit-update.yaml new file mode 100644 index 00000000000..1428aab2629 --- /dev/null +++ b/changelog/fragments/1669066497-struct-compare-unit-update.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: bug-fix + +# Change summary; a 80ish characters long description of the change. +summary: Change config comparison to fix false unit update events + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: runtime + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/1766 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: https://github.com/elastic/elastic-agent/issues/1738 diff --git a/changelog/fragments/1669159455-reimplement-processes-route.yaml b/changelog/fragments/1669159455-reimplement-processes-route.yaml new file mode 100644 index 00000000000..aaa00ef715d --- /dev/null +++ b/changelog/fragments/1669159455-reimplement-processes-route.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: feature + +# Change summary; a 80ish characters long description of the change. +summary: Reimplement processes route + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +description: Re-implement processes route to restore compatibility with the current cloud health checks + +# Affected component; a word indicating the component this changeset affects. +component: + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/1773 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: https://github.com/elastic/elastic-agent/issues/1731 diff --git a/changelog/fragments/1669236059-Capture-stdout-stderr-of-all-spawned-components-to-simplify-logging.yaml b/changelog/fragments/1669236059-Capture-stdout-stderr-of-all-spawned-components-to-simplify-logging.yaml new file mode 100644 index 00000000000..a20cfea270b --- /dev/null +++ b/changelog/fragments/1669236059-Capture-stdout-stderr-of-all-spawned-components-to-simplify-logging.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: feature + +# Change summary; a 80ish characters long description of the change. +summary: Capture stdout/stderr of all spawned components and adjust default log level to info for all components + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/1702 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: https://github.com/elastic/elastic-agent/issues/221 diff --git a/changelog/fragments/1669665017-fix-nil-action-ack-crash-after-local-upgrade.yaml b/changelog/fragments/1669665017-fix-nil-action-ack-crash-after-local-upgrade.yaml new file mode 100644 index 00000000000..083347ef366 --- /dev/null +++ b/changelog/fragments/1669665017-fix-nil-action-ack-crash-after-local-upgrade.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: bug-fix + +# Change summary; a 80ish characters long description of the change. +summary: Fix Elastic-Agent fails to re-start during upgrade + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/1805 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: https://github.com/elastic/elastic-agent/issues/1788 diff --git a/changelog/fragments/1669668741-fix-erroneous-http-url-directory-created-for-monitoring.yaml b/changelog/fragments/1669668741-fix-erroneous-http-url-directory-created-for-monitoring.yaml new file mode 100644 index 00000000000..3b3b899ebc9 --- /dev/null +++ b/changelog/fragments/1669668741-fix-erroneous-http-url-directory-created-for-monitoring.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: bug-fix + +# Change summary; a 80ish characters long description of the change. +summary: Fix erroneous http url directory created for monitoring + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/1811 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: https://github.com/elastic/elastic-agent/issues/1806 diff --git a/changelog/fragments/1669739947-fix-osqueryd-and-osquery-extension.ext-permissions.yaml b/changelog/fragments/1669739947-fix-osqueryd-and-osquery-extension.ext-permissions.yaml new file mode 100644 index 00000000000..09dc99a0d4e --- /dev/null +++ b/changelog/fragments/1669739947-fix-osqueryd-and-osquery-extension.ext-permissions.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: bug-fix + +# Change summary; a 80ish characters long description of the change. +summary: Fix osqueryd and osquery-extension.ext permissions + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/1829 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +#issue: 1234 diff --git a/changelog/fragments/1669845868-add-agent-logging-level-to-fleet-configuration.yaml b/changelog/fragments/1669845868-add-agent-logging-level-to-fleet-configuration.yaml new file mode 100644 index 00000000000..cd3bb7e6383 --- /dev/null +++ b/changelog/fragments/1669845868-add-agent-logging-level-to-fleet-configuration.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: feature + +# Change summary; a 80ish characters long description of the change. +summary: Add agent logging level to fleet configuration + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/1856 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: https://github.com/elastic/elastic-agent/issues/1853 diff --git a/changelog/fragments/1669928791-Ensure-unique-input-ID-when-variable-substition-occurs-from-a-dynamic-provider.yaml b/changelog/fragments/1669928791-Ensure-unique-input-ID-when-variable-substition-occurs-from-a-dynamic-provider.yaml new file mode 100644 index 00000000000..5af9a9c9b1a --- /dev/null +++ b/changelog/fragments/1669928791-Ensure-unique-input-ID-when-variable-substition-occurs-from-a-dynamic-provider.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: feature + +# Change summary; a 80ish characters long description of the change. +summary: Ensure unique input ID when variable substition occurs from a dynamic provider + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/1866 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: https://github.com/elastic/elastic-agent/issues/1751 diff --git a/changelog/fragments/1669929902-fix-local-fleet-server-port.yaml b/changelog/fragments/1669929902-fix-local-fleet-server-port.yaml new file mode 100644 index 00000000000..250d4373ba0 --- /dev/null +++ b/changelog/fragments/1669929902-fix-local-fleet-server-port.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: bug-fix + +# Change summary; a 80ish characters long description of the change. +summary: Elastic-agent will use local port when running fleet-server + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +description: The elastic-agent will now use the 8221 locally bound port when running fleet-server instead of the external port (8220). + +# Affected component; a word indicating the component this changeset affects. +component: + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/1867 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: https://github.com/elastic/ingest-dev/issues/1394 diff --git a/changelog/fragments/1669940990-Remove-inputs-when-all-streams-are-removed.yaml b/changelog/fragments/1669940990-Remove-inputs-when-all-streams-are-removed.yaml new file mode 100644 index 00000000000..240cb753106 --- /dev/null +++ b/changelog/fragments/1669940990-Remove-inputs-when-all-streams-are-removed.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: feature + +# Change summary; a 80ish characters long description of the change. +summary: Remove inputs when all streams are removed + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/1869 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: https://github.com/elastic/elastic-agent/issues/1868 diff --git a/changelog/fragments/1670518841-No-longer-restart-Elastic-Agent-on-log-level-change.yaml b/changelog/fragments/1670518841-No-longer-restart-Elastic-Agent-on-log-level-change.yaml new file mode 100644 index 00000000000..b84d7621212 --- /dev/null +++ b/changelog/fragments/1670518841-No-longer-restart-Elastic-Agent-on-log-level-change.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: feature + +# Change summary; a 80ish characters long description of the change. +summary: No longer restart Elastic Agent on log level change + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/1914 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: https://github.com/elastic/elastic-agent/issues/1896 diff --git a/changelog/fragments/1671748292-input-ids-are-required.yaml b/changelog/fragments/1671748292-input-ids-are-required.yaml new file mode 100644 index 00000000000..b14524c4352 --- /dev/null +++ b/changelog/fragments/1671748292-input-ids-are-required.yaml @@ -0,0 +1,34 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: breaking-change + +# Change summary; a 80ish characters long description of the change. +summary: Each input in an agent policy must have a unique ID. + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +description: | + Each input in an agent policy must have a unique ID, like "id: my-unique-input-id". + This only affects standalone agents. Unique IDs are automatically generated in + agent policies managed by Fleet. + +# Affected component; a word indicating the component this changeset affects. +component: agent + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +#pr: https://github.com/owner/repo/1234 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +#issue: https://github.com/owner/repo/1234 diff --git a/changelog/fragments/1672911992-v2-control-protocol.yaml b/changelog/fragments/1672911992-v2-control-protocol.yaml new file mode 100644 index 00000000000..ddc0e737eda --- /dev/null +++ b/changelog/fragments/1672911992-v2-control-protocol.yaml @@ -0,0 +1,32 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: breaking-change + +# Change summary; a 80ish characters long description of the change. +summary: | + New control protocol for the Elastic Agent. Allows among other things to simplify new inputs development. + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# description: + +# Affected component; a word indicating the component this changeset affects. +component: agent + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/1701 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: https://github.com/elastic/elastic-agent/issues/836 \ No newline at end of file diff --git a/changelog/fragments/1672911993-v2-directory.yaml b/changelog/fragments/1672911993-v2-directory.yaml new file mode 100644 index 00000000000..992c10bbcaa --- /dev/null +++ b/changelog/fragments/1672911993-v2-directory.yaml @@ -0,0 +1,33 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: upgrade + +# Change summary; a 80ish characters long description of the change. +summary: | + Internal directory structure change: addition of the components directory + (and removal of the downloads directory) to contain binaries and their associated artifacts. + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# description: + +# Affected component; a word indicating the component this changeset affects. +component: agent + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/1701 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: https://github.com/elastic/elastic-agent/issues/836 diff --git a/changelog/fragments/1672911994-v2-packaging.yaml b/changelog/fragments/1672911994-v2-packaging.yaml new file mode 100644 index 00000000000..729107e3d6d --- /dev/null +++ b/changelog/fragments/1672911994-v2-packaging.yaml @@ -0,0 +1,32 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: upgrade + +# Change summary; a 80ish characters long description of the change. +summary: | + All binaries for every supported integration will be bundled in the Elastic Agent by default. + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# description: + +# Affected component; a word indicating the component this changeset affects. +component: agent + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent-shipper/pull/126 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: https://github.com/elastic/elastic-agent/issues/836 diff --git a/changelog/fragments/1672911995-v2-diagnostic.yaml b/changelog/fragments/1672911995-v2-diagnostic.yaml new file mode 100644 index 00000000000..d411ab81806 --- /dev/null +++ b/changelog/fragments/1672911995-v2-diagnostic.yaml @@ -0,0 +1,34 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: breaking-change + +# Change summary; a 80ish characters long description of the change. +summary: | + Diagnostic: --pprof argument has been removed, it's now always provided. + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +description: | + The diagnostic command is gathering diagnostic information about the Elastic Agent and each + component/unit ran by the Elastic Agent. --pprof argument has been removed: it's now always provided. + +# Affected component; a word indicating the component this changeset affects. +component: agent + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/1140 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: https://github.com/elastic/elastic-agent/issues/836 diff --git a/changelog/fragments/1672911997-v2-inspect.yaml b/changelog/fragments/1672911997-v2-inspect.yaml new file mode 100644 index 00000000000..6ecaad07077 --- /dev/null +++ b/changelog/fragments/1672911997-v2-inspect.yaml @@ -0,0 +1,32 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: feature + +# Change summary; a 80ish characters long description of the change. +summary: | + Add inspect components command to inspect the computed components/units model of the current configuration (elastic-agent inspect components). + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# description: + +# Affected component; a word indicating the component this changeset affects. +component: agent + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/1701 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: https://github.com/elastic/elastic-agent/issues/836 diff --git a/changelog/fragments/1672911999-v2-health-status.yaml b/changelog/fragments/1672911999-v2-health-status.yaml new file mode 100644 index 00000000000..ab1d29d383d --- /dev/null +++ b/changelog/fragments/1672911999-v2-health-status.yaml @@ -0,0 +1,32 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: enhancement + +# Change summary; a 80ish characters long description of the change. +summary: | + Health Status: Elastic Agent now indicates detailed status information for each component/unit. + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# description: + +# Affected component; a word indicating the component this changeset affects. +component: agent + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/fleet-server/pull/1747 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: https://github.com/elastic/elastic-agent/issues/100 diff --git a/changelog/fragments/1672912001-fix-duplicate-entries-on-upgrade.yaml b/changelog/fragments/1672912001-fix-duplicate-entries-on-upgrade.yaml new file mode 100644 index 00000000000..63f3e6ca68d --- /dev/null +++ b/changelog/fragments/1672912001-fix-duplicate-entries-on-upgrade.yaml @@ -0,0 +1,32 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: bug-fix + +# Change summary; a 80ish characters long description of the change. +summary: | + Fix duplicate entries on Elastic Agent upgrades issue. + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# description: + +# Affected component; a word indicating the component this changeset affects. +component: agent + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/1701 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: https://github.com/elastic/elastic-agent/issues/836 diff --git a/control_v1.proto b/control_v1.proto new file mode 100644 index 00000000000..396d8c2fb52 --- /dev/null +++ b/control_v1.proto @@ -0,0 +1,123 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +syntax = "proto3"; + +// proto namespace/package name is shared with elastic-agent-client +// we need to be careful with modifications to avoid name collisions +// proto is here to maintain backward compatibility and cannot be changed. +// elastic-agent-client namespace is likely change after 8.6 +package proto; + +option cc_enable_arenas = true; +option go_package = "pkg/agent/control/proto;proto"; + +// Status codes for the current state. +enum Status { + V1_STARTING = 0; + V1_CONFIGURING = 1; + V1_HEALTHY = 2; + V1_DEGRADED = 3; + V1_FAILED = 4; + V1_STOPPING = 5; + V1_UPGRADING = 6; + V1_SROLLBACK = 7; +} + +// Action status codes for restart and upgrade response. +enum ActionStatus { + // Action was successful. + V1_SUCCESS = 0; + // Action failed. + V1_FAILURE = 1; +} + +// Empty message. +message Empty { +} + +// Version response message. +message VersionResponse { + // Current running version. + string version = 1; + // Current running commit. + string commit = 2; + // Current running build time. + string buildTime = 3; + // Current running version is a snapshot. + bool snapshot = 4; +} + +message RestartResponse { + // Response status. + ActionStatus status = 1; + // Error message when it fails to trigger restart. + string error = 2; +} + +// Upgrade request message. +message UpgradeRequest { + // (Optional) Version to upgrade to. + // + // If not provided Elastic Agent will auto discover the latest version in the same major + // to upgrade to. If wanting to upgrade to a new major that major must be present in the + // this version field. + string version = 1; + + // (Optional) Use a different source URI then configured. + // + // If provided the upgrade process will use the provided sourceURI instead of the configured + // sourceURI in the configuration. + string sourceURI = 2; +} + +// A upgrade response message. +message UpgradeResponse { + // Response status. + ActionStatus status = 1; + + // Version that is being upgraded to. + string version = 2; + + // Error message when it fails to trigger upgrade. + string error = 3; +} + +// Current status of the application in Elastic Agent. +message ApplicationStatus { + // Unique application ID. + string id = 1; + // Application name. + string name = 2; + // Current status. + Status status = 3; + // Current status message. + string message = 4; + // Current status payload. + string payload = 5; +} + +// Status is the current status of Elastic Agent. +message StatusResponse { + // Overall status of Elastic Agent. + Status status = 1; + // Overall status message of Elastic Agent. + string message = 2; + // Status of each application in Elastic Agent. + repeated ApplicationStatus applications = 3; +} + +service ElasticAgentControl { + // Fetches the currently running version of the Elastic Agent. + rpc Version(Empty) returns (VersionResponse); + + // Fetches the currently status of the Elastic Agent. + rpc Status(Empty) returns (StatusResponse); + + // Restart restarts the current running Elastic Agent. + rpc Restart(Empty) returns (RestartResponse); + + // Upgrade starts the upgrade process of Elastic Agent. + rpc Upgrade(UpgradeRequest) returns (UpgradeResponse); +} diff --git a/control.proto b/control_v2.proto similarity index 96% rename from control.proto rename to control_v2.proto index 25eef00de4c..e955fd845b5 100644 --- a/control.proto +++ b/control_v2.proto @@ -7,7 +7,7 @@ syntax = "proto3"; package cproto; option cc_enable_arenas = true; -option go_package = "internal/pkg/agent/control/cproto"; +option go_package = "internal/pkg/agent/control/v2/cproto"; import "google/protobuf/timestamp.proto"; // State codes for the current state. @@ -159,9 +159,9 @@ message StateResponse { StateAgentInfo info = 1; // Overall state of Elastic Agent. State state = 2; - // Overall status message of Elastic Agent. + // Overall state message of Elastic Agent. string message = 3; - // Status of each component in Elastic Agent. + // State of each component in Elastic Agent. repeated ComponentState components = 4; } @@ -244,5 +244,5 @@ service ElasticAgentControl { rpc DiagnosticAgent(DiagnosticAgentRequest) returns (DiagnosticAgentResponse); // Gather diagnostic information for the running units. - rpc DiagnosticUnits(DiagnosticUnitsRequest) returns (DiagnosticUnitsResponse); + rpc DiagnosticUnits(DiagnosticUnitsRequest) returns (stream DiagnosticUnitResponse); } diff --git a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml index 373282a4c1b..e3034033a9f 100644 --- a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml @@ -28,7 +28,7 @@ data: #Uncomment to enable hints' support #hints.enabled: true inputs: - - name: kubernetes-cluster-metrics + - id: kubernetes-cluster-metrics condition: ${kubernetes_leaderelection.leader} == true type: kubernetes/metrics use_output: default @@ -267,7 +267,7 @@ data: # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # ssl.certificate_authorities: # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - - name: system-logs + - id: system-logs type: logfile use_output: default meta: @@ -309,8 +309,7 @@ data: target: '' fields: ecs.version: 1.12.0 - - name: container-log - id: container-log-${kubernetes.pod.name}-${kubernetes.container.id} + - id: container-log-${kubernetes.pod.name}-${kubernetes.container.id} type: filestream use_output: default meta: @@ -335,8 +334,7 @@ data: # match: after paths: - /var/log/containers/*${kubernetes.container.id}.log - - name: audit-log - id: audit-log + - id: audit-log type: filestream use_output: default meta: @@ -381,7 +379,7 @@ data: throw "expected kubernetes.audit.annotations.authorization_k8s_io/decision === allow"; } } - - name: system-metrics + - id: system-metrics type: system/metrics use_output: default meta: @@ -479,7 +477,7 @@ data: metricsets: - socket_summary system.hostfs: /hostfs - - name: kubernetes-node-metrics + - id: kubernetes-node-metrics type: kubernetes/metrics use_output: default meta: @@ -603,7 +601,7 @@ data: # Add extra input blocks here, based on conditions # so as to automatically identify targeted Pods and start monitoring them # using a predefined integration. For instance: - #- name: redis + #- id: redis-metrics # type: redis/metrics # use_output: default # meta: diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml index 1a52302826d..1bc76e26506 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml @@ -28,7 +28,7 @@ data: #Uncomment to enable hints' support #hints.enabled: true inputs: - - name: kubernetes-cluster-metrics + - id: kubernetes-cluster-metrics condition: ${kubernetes_leaderelection.leader} == true type: kubernetes/metrics use_output: default @@ -267,7 +267,7 @@ data: # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # ssl.certificate_authorities: # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - - name: system-logs + - id: system-logs type: logfile use_output: default meta: @@ -309,8 +309,7 @@ data: target: '' fields: ecs.version: 1.12.0 - - name: container-log - id: container-log-${kubernetes.pod.name}-${kubernetes.container.id} + - id: container-log-${kubernetes.pod.name}-${kubernetes.container.id} type: filestream use_output: default meta: @@ -335,8 +334,7 @@ data: # match: after paths: - /var/log/containers/*${kubernetes.container.id}.log - - name: audit-log - id: audit-log + - id: audit-log type: filestream use_output: default meta: @@ -381,7 +379,7 @@ data: throw "expected kubernetes.audit.annotations.authorization_k8s_io/decision === allow"; } } - - name: system-metrics + - id: system-metrics type: system/metrics use_output: default meta: @@ -479,7 +477,7 @@ data: metricsets: - socket_summary system.hostfs: /hostfs - - name: kubernetes-node-metrics + - id: kubernetes-node-metrics type: kubernetes/metrics use_output: default meta: @@ -603,7 +601,7 @@ data: # Add extra input blocks here, based on conditions # so as to automatically identify targeted Pods and start monitoring them # using a predefined integration. For instance: - #- name: redis + #- id: redis-metrics # type: redis/metrics # use_output: default # meta: diff --git a/dev-tools/mage/platforms.go b/dev-tools/mage/platforms.go index 5450f5a4094..aa930989294 100644 --- a/dev-tools/mage/platforms.go +++ b/dev-tools/mage/platforms.go @@ -200,6 +200,14 @@ func (list BuildPlatformList) Get(name string) (BuildPlatform, bool) { return BuildPlatform{}, false } +func (list BuildPlatformList) Names() []string { + platforms := make([]string, len(list)) + for i, bp := range list { + platforms[i] = bp.Name + } + return platforms +} + // Defaults returns the default platforms contained in the list. func (list BuildPlatformList) Defaults() BuildPlatformList { return list.filter(func(p BuildPlatform) bool { diff --git a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl index ec3cd1e4673..7ca7aeba198 100644 --- a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl +++ b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl @@ -18,6 +18,7 @@ RUN mkdir -p {{ $beatHome }}/data {{ $beatHome }}/data/elastic-agent-{{ commit_s ln -s {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/elastic-agent {{ $beatBinary }} && \ chmod 0755 {{ $beatHome }}/data/elastic-agent-*/elastic-agent && \ chmod 0755 {{ $beatHome }}/data/elastic-agent-*/components/*beat && \ + (chmod 0755 {{ $beatHome }}/data/elastic-agent-*/components/osquery* || true) && \ (chmod 0755 {{ $beatHome }}/data/elastic-agent-*/components/apm-server || true) && \ (chmod 0755 {{ $beatHome }}/data/elastic-agent-*/components/endpoint-security || true) && \ (chmod 0755 {{ $beatHome }}/data/elastic-agent-*/components/fleet-server || true) && \ diff --git a/dev-tools/packaging/templates/ironbank/Dockerfile.tmpl b/dev-tools/packaging/templates/ironbank/Dockerfile.tmpl index 04c4dfde930..1e9d5bf74e9 100644 --- a/dev-tools/packaging/templates/ironbank/Dockerfile.tmpl +++ b/dev-tools/packaging/templates/ironbank/Dockerfile.tmpl @@ -4,7 +4,7 @@ ################################################################################ ARG BASE_REGISTRY=registry1.dsop.io ARG BASE_IMAGE=ironbank/redhat/ubi/ubi8 -ARG BASE_TAG=8.6 +ARG BASE_TAG=8.7 FROM ${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG} as prep_files diff --git a/dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl b/dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl index e4b4df82e23..8b0e4e0a5fe 100644 --- a/dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl +++ b/dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl @@ -14,7 +14,7 @@ tags: # Build args passed to Dockerfile ARGs args: BASE_IMAGE: "redhat/ubi/ubi8" - BASE_TAG: "8.6" + BASE_TAG: "8.7" ELASTIC_STACK: "{{ beat_version }}" ELASTIC_PRODUCT: "elastic-agent" diff --git a/elastic-agent.docker.yml b/elastic-agent.docker.yml index 41826bcf12b..42a20740545 100644 --- a/elastic-agent.docker.yml +++ b/elastic-agent.docker.yml @@ -10,7 +10,8 @@ outputs: inputs: - type: system/metrics - + # Each input must have a unique ID. + id: unique-system-metrics-input # Namespace name must conform to the naming conventions for Elasticsearch indices, cannot contain dashes (-), and cannot exceed 100 bytes # For index naming restrictions, see https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html#indices-create-api-path-params data_stream.namespace: default @@ -83,6 +84,9 @@ inputs: # address: localhost # # port for the GRPC server that spawned processes connect back to. # port: 6789 +# # max_message_size limits the message size in agent internal communication +# # default is 100MB +# max_message_size: 104857600 # agent.retry: # # Enabled determines whether retry is possible. Default is false. @@ -178,7 +182,7 @@ agent.logging.to_stderr: true # Configure log file size limit. If limit is reached, log file will be # automatically rotated - #rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 20971520 # = 20MB # Number of rotated log files to keep. Oldest files will be deleted first. #keepfiles: 7 diff --git a/elastic-agent.reference.yml b/elastic-agent.reference.yml index c23e98e6246..55482b5bf81 100644 --- a/elastic-agent.reference.yml +++ b/elastic-agent.reference.yml @@ -17,7 +17,8 @@ outputs: inputs: - type: system/metrics - + # Each input must have a unique ID. + id: unique-system-metrics-input # Namespace name must conform to the naming conventions for Elasticsearch indices, cannot contain dashes (-), and cannot exceed 100 bytes # For index naming restrictions, see https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html#indices-create-api-path-params data_stream.namespace: default @@ -90,6 +91,9 @@ inputs: # address: localhost # # port for the GRPC server that spawned processes connect back to. # port: 6789 +# # max_message_size limits the message size in agent internal communication +# # default is 100MB +# max_message_size: 104857600 # agent.retry: # # Enabled determines whether retry is possible. Default is false. @@ -185,7 +189,7 @@ agent.logging.to_stderr: true # Configure log file size limit. If limit is reached, log file will be # automatically rotated - #rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 20971520 # = 20MB # Number of rotated log files to keep. Oldest files will be deleted first. #keepfiles: 7 diff --git a/elastic-agent.yml b/elastic-agent.yml index 2e0fbf2b5f5..5da59d4fd81 100644 --- a/elastic-agent.yml +++ b/elastic-agent.yml @@ -17,7 +17,8 @@ outputs: inputs: - type: system/metrics - + # Each input must have a unique ID. + id: unique-system-metrics-input # Namespace name must conform to the naming conventions for Elasticsearch indices, cannot contain dashes (-), and cannot exceed 100 bytes # For index naming restrictions, see https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html#indices-create-api-path-params data_stream.namespace: default @@ -123,6 +124,9 @@ inputs: # address: localhost # # port for the GRPC server that spawned processes connect back to. # port: 6789 +# # max_message_size limits the message size in agent internal communication +# # default is 100MB +# max_message_size: 104857600 # agent.retry: # # Enabled determines whether retry is possible. Default is false. @@ -185,7 +189,7 @@ agent.logging.to_stderr: true # Configure log file size limit. If limit is reached, log file will be # automatically rotated - #rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 20971520 # = 20MB # Number of rotated log files to keep. Oldest files will be deleted first. #keepfiles: 7 diff --git a/go.mod b/go.mod index df1845dff01..56773fa2d41 100644 --- a/go.mod +++ b/go.mod @@ -11,10 +11,10 @@ require ( github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534 github.com/docker/go-units v0.4.0 github.com/dolmen-go/contextio v0.0.0-20200217195037-68fc5150bcd5 - github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4 + github.com/elastic/e2e-testing v1.99.2-0.20221205111528-ade3c840d0c0 github.com/elastic/elastic-agent-autodiscover v0.2.1 - github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484 - github.com/elastic/elastic-agent-libs v0.2.6 + github.com/elastic/elastic-agent-client/v7 v7.0.2-0.20221129150247-15881a8e64ef + github.com/elastic/elastic-agent-libs v0.2.15 github.com/elastic/elastic-agent-system-metrics v0.4.4 github.com/elastic/go-licenser v0.4.0 github.com/elastic/go-sysinfo v1.8.1 @@ -28,7 +28,7 @@ require ( github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 github.com/josephspurrier/goversioninfo v0.0.0-20190209210621-63e6d1acd3dd github.com/kardianos/service v1.2.1-0.20210728001519-a323c3813bc7 - github.com/magefile/mage v1.13.0 + github.com/magefile/mage v1.14.0 github.com/mitchellh/gox v1.0.1 github.com/mitchellh/hashstructure v0.0.0-20170116052023-ab25296c0f51 github.com/mitchellh/mapstructure v1.5.0 @@ -38,6 +38,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/rs/zerolog v1.27.0 github.com/shirou/gopsutil/v3 v3.21.12 + github.com/sirupsen/logrus v1.8.1 github.com/spf13/cobra v1.3.0 github.com/stretchr/testify v1.7.0 github.com/tsg/go-daemon v0.0.0-20200207173439-e704b93fd89b @@ -45,10 +46,11 @@ require ( go.elastic.co/ecszap v1.0.1 go.elastic.co/go-licence-detector v0.5.0 go.uber.org/zap v1.21.0 - golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 + golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871 golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 + golang.org/x/time v0.3.0 golang.org/x/tools v0.1.9 google.golang.org/grpc v1.46.0 google.golang.org/protobuf v1.28.0 @@ -65,7 +67,7 @@ require ( github.com/akavel/rsrc v0.8.0 // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/cavaliercoder/badio v0.0.0-20160213150051-ce5280129e9e // indirect - github.com/cenkalti/backoff/v4 v4.1.1 // indirect + github.com/cenkalti/backoff/v4 v4.1.2 // indirect github.com/containerd/containerd v1.5.13 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -111,7 +113,6 @@ require ( github.com/prometheus/procfs v0.7.3 // indirect github.com/santhosh-tekuri/jsonschema v1.2.4 // indirect github.com/sergi/go-diff v1.1.0 // indirect - github.com/sirupsen/logrus v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.2.0 // indirect github.com/tklauser/go-sysconf v0.3.9 // indirect @@ -128,7 +129,6 @@ require ( golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect - golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220426171045-31bebdecfb46 // indirect @@ -140,7 +140,7 @@ require ( k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect - sigs.k8s.io/yaml v1.2.0 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect ) require ( diff --git a/go.sum b/go.sum index 73ded2d2cf3..3eede0a9bcd 100644 --- a/go.sum +++ b/go.sum @@ -50,6 +50,7 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/AlecAivazis/survey/v2 v2.3.2/go.mod h1:TH2kPCDU3Kqq7pLbnCWwZXDBjnhZtmsCle5EiYDJ2fg= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v56.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= @@ -58,16 +59,20 @@ github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSW github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest v0.11.20/go.mod h1:o3tqFY+QR40VOlk+pV4d77mORO64jOXSgEnPQgLK6JY= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= @@ -92,7 +97,9 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2 github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= github.com/Microsoft/hcsshim v0.8.24/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim v0.9.1/go.mod h1:Y/0uV2jUab5kBI7SQgl62at0AVX7uaruzADAVmxm3eM= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -103,10 +110,12 @@ github.com/PaesslerAG/gval v1.0.0/go.mod h1:y/nm5yEyTeX6av0OfKJNp9rBNj2XrGhAf5+v github.com/PaesslerAG/jsonpath v0.1.0/go.mod h1:4BzmtoM/PI8fPO4aQGIusjGxGir2BzcV0grWtFzq1Y8= github.com/PaesslerAG/jsonpath v0.1.1/go.mod h1:lVboNxFGal/VwW6d9JzIy56bUsYAP6tH/x80vjnCseY= github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= -github.com/ProtonMail/go-crypto v0.0.0-20211221144345-a4f6767435ab/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= +github.com/ProtonMail/go-crypto v0.0.0-20220113124808-70ae35bab23f/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= github.com/ProtonMail/go-mime v0.0.0-20190923161245-9b5a4261663a/go.mod h1:NYt+V3/4rEeDuaev/zw1zCq8uqVEuPHzDPo3OZrlGJ4= -github.com/ProtonMail/gopenpgp/v2 v2.4.0/go.mod h1:RFjoVjfhV8f78tjz/fLrp/OXkugL3QmWsiJq/fsQYA4= +github.com/ProtonMail/gopenpgp/v2 v2.4.2/go.mod h1:0byYFEOo6x4F/1YqhN7Z6m015Cqnxllz3CGb5cjJueY= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= @@ -128,6 +137,7 @@ github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYU github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4 v0.0.0-20200820155224-be881fa6b91d h1:OE3kzLBpy7pOJEzE55j9sdgrSilUPzzj++FWvp1cmIs= github.com/antlr/antlr4 v0.0.0-20200820155224-be881fa6b91d/go.mod h1:T7PbCXFs94rrTttyxjbyT5+/1V8T2TYDejxUfHJjw1Y= +github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= @@ -139,7 +149,6 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aslakhellesoy/gox v1.0.100/go.mod h1:AJl542QsKKG96COVsv0N74HHzVQgDIQPceVUh1aeU2M= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.34.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aymerick/raymond v2.0.2+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= @@ -175,10 +184,9 @@ github.com/cavaliercoder/badio v0.0.0-20160213150051-ce5280129e9e h1:YYUjy5BRwO5 github.com/cavaliercoder/badio v0.0.0-20160213150051-ce5280129e9e/go.mod h1:V284PjgVwSk4ETmz84rpu9ehpGg7swlIH8npP9k2bGw= github.com/cavaliercoder/go-rpm v0.0.0-20190131055624-7a9c54e3d83e h1:Gbx+iVCXG/1m5WSnidDGuHgN+vbIwl+6fR092ANU+Y8= github.com/cavaliercoder/go-rpm v0.0.0-20190131055624-7a9c54e3d83e/go.mod h1:AZIh1CCnMrcVm6afFf96PBvE2MRpWFco91z8ObJtgDY= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.1.0/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= @@ -228,6 +236,7 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.2/go.mod h1:qpbpJ1jmlqsR9f2IyaLPsdkCdnt0rbDVqIDlhuu5tRY= github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= @@ -248,8 +257,8 @@ github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= -github.com/containerd/containerd v1.5.2/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= +github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= github.com/containerd/containerd v1.5.13 h1:XqvKw9i4P7/mFrC3TSM7yV5cwFZ9avXe6M3YANKnzEE= github.com/containerd/containerd v1.5.13/go.mod h1:3AlCrzKROjIuP3JALsY14n8YtntaUDBu7vek+rPN5Vc= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -279,6 +288,7 @@ github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJ github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= @@ -327,11 +337,11 @@ github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7Do github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creasty/defaults v1.5.2/go.mod h1:FPZ+Y0WNrbqOVw+c6av63eyHUAl6pMHZwqLPvXUZGfY= -github.com/cucumber/gherkin-go/v11 v11.0.0/go.mod h1:CX33k2XU2qog4e+TFjOValoq6mIUq0DmVccZs238R9w= +github.com/cucumber/gherkin-go/v19 v19.0.3/go.mod h1:jY/NP6jUtRSArQQJ5h1FXOUgk5fZK24qtE7vKi776Vw= github.com/cucumber/godog v0.8.1/go.mod h1:vSh3r/lM+psC1BPXvdkSEuNjmXfpVqrMGYAElF6hxnA= -github.com/cucumber/godog v0.11.0/go.mod h1:GyxCIrsg1sgEgpL2GD/rMr3fIoNHpgkjm9nANw/89XY= -github.com/cucumber/messages-go/v10 v10.0.1/go.mod h1:kA5T38CBlBbYLU12TIrJ4fk4wSkVVOgyh7Enyy8WnSg= -github.com/cucumber/messages-go/v10 v10.0.3/go.mod h1:9jMZ2Y8ZxjLY6TG2+x344nt5rXstVVDYSdS5ySfI1WY= +github.com/cucumber/godog v0.12.4/go.mod h1:u6SD7IXC49dLpPN35kal0oYEjsXZWee4pW6Tm9t5pIc= +github.com/cucumber/messages-go/v16 v16.0.0/go.mod h1:EJcyR5Mm5ZuDsKJnT2N9KRnBK30BGjtYotDKpwQ0v6g= +github.com/cucumber/messages-go/v16 v16.0.1/go.mod h1:EJcyR5Mm5ZuDsKJnT2N9KRnBK30BGjtYotDKpwQ0v6g= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= @@ -339,6 +349,7 @@ github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1S github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -348,21 +359,23 @@ github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8l github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/distribution/distribution/v3 v3.0.0-20210804104954-38ab4c606ee3/go.mod h1:gt38b7cvVKazi5XkHvINNytZXgTEntyhtyM3HQz46Nk= +github.com/distribution/distribution/v3 v3.0.0-20211118083504-a29a3c99a684/go.mod h1:UfCu3YXJJCI+IdnqGgYP82dk2+Joxmv+mUTVBES6wac= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/dnephin/pflag v1.0.7 h1:oxONGlWxhmUct0YzKTgrpQv9AUA1wtPBn7zuSjJqptk= github.com/dnephin/pflag v1.0.7/go.mod h1:uxE91IoWURlOiTUIA8Mq5ZZkAv3dPUfZNaT80Zm7OQE= -github.com/docker/cli v20.10.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v20.10.11+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.11+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.12+incompatible h1:CEeNmFM0QZIsJCZKMkZx0ZcahTiewkrgiwfYD+dfl1U= github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= @@ -380,18 +393,18 @@ github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj6 github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4 h1:uYT+Krd8dsvnhnLK9pe/JHZkYtXEGPfbV4Wt1JPPol0= -github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4/go.mod h1:UcNuf4pX/qDVNQr0zybm1NL2YoWik+jKBaINZqQCA40= +github.com/elastic/e2e-testing v1.99.2-0.20221205111528-ade3c840d0c0 h1:dTrVPE6HoIHjnuq688zb7JRdcav17IC4wjY1tpwCUE4= +github.com/elastic/e2e-testing v1.99.2-0.20221205111528-ade3c840d0c0/go.mod h1:hzqVK19fiowHGWldLoKRHl3eXLErKyP89o7L/R3aHsk= github.com/elastic/elastic-agent-autodiscover v0.2.1 h1:Nbeayh3vq2FNm6xaFo34mhUdOu0EVlpj53CqCsbU0E4= github.com/elastic/elastic-agent-autodiscover v0.2.1/go.mod h1:gPnzzfdYNdgznAb+iG9eyyXaQXBbAMHa+Y6Z8hXfcGY= -github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484 h1:uJIMfLgCenJvxsVmEjBjYGxt0JddCgw2IxgoNfcIXOk= -github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484/go.mod h1:fkvyUfFwyAG5OnMF0h+FV9sC0Xn9YLITwQpSuwungQs= +github.com/elastic/elastic-agent-client/v7 v7.0.2-0.20221129150247-15881a8e64ef h1:+3AWaimDL826eoU06qOFBtA3xmyuTr9YUMVWvnim4mU= +github.com/elastic/elastic-agent-client/v7 v7.0.2-0.20221129150247-15881a8e64ef/go.mod h1:cHviLpA5fAwMbfBIHBVNl16qp90bO7pKHMAQaG+9raU= github.com/elastic/elastic-agent-libs v0.2.5/go.mod h1:chO3rtcLyGlKi9S0iGVZhYCzDfdDsAQYBc+ui588AFE= -github.com/elastic/elastic-agent-libs v0.2.6 h1:DpcUcCVYZ7lNtHLUlyT1u/GtGAh49wpL15DTH7+8O5o= -github.com/elastic/elastic-agent-libs v0.2.6/go.mod h1:chO3rtcLyGlKi9S0iGVZhYCzDfdDsAQYBc+ui588AFE= +github.com/elastic/elastic-agent-libs v0.2.15 h1:hdAbrZZ2mCPcQLRCE3E8xw3mHKl8HFMt36w7jan/XGo= +github.com/elastic/elastic-agent-libs v0.2.15/go.mod h1:0J9lzJh+BjttIiVjYDLncKYCEWUUHiiqnuI64y6C6ss= github.com/elastic/elastic-agent-system-metrics v0.4.4 h1:Br3S+TlBhijrLysOvbHscFhgQ00X/trDT5VEnOau0E0= github.com/elastic/elastic-agent-system-metrics v0.4.4/go.mod h1:tF/f9Off38nfzTZHIVQ++FkXrDm9keFhFpJ+3pQ00iI= -github.com/elastic/elastic-package v0.32.1/go.mod h1:l1fEnF52XRBL6a5h6uAemtdViz2bjtjUtgdQcuRhEAY= +github.com/elastic/elastic-package v0.36.0/go.mod h1:TUyhRXtf+kazrUthMF+5FtngcFJZtsgty0o/nnl8UFU= github.com/elastic/go-elasticsearch/v7 v7.16.0/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= github.com/elastic/go-elasticsearch/v8 v8.0.0-20210317102009-a9d74cec0186/go.mod h1:xe9a/L2aeOgFKKgrO3ibQTnMdpAeL0GC+5/HpGScSa4= github.com/elastic/go-licenser v0.3.1/go.mod h1:D8eNQk70FOCVBl3smCGQt/lv7meBeQno2eI1S5apiHQ= @@ -472,21 +485,25 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= @@ -534,8 +551,7 @@ github.com/godbus/dbus/v5 v5.0.5/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/godror/godror v0.24.2/go.mod h1:wZv/9vPiUib6tkoDl+AZ/QLf5YZgMravZ7jxH2eQWAE= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= @@ -547,8 +563,10 @@ github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -591,6 +609,8 @@ github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUz github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/cel-go v0.9.0/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= +github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -604,6 +624,7 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= @@ -653,6 +674,7 @@ github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -744,10 +766,12 @@ github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= -github.com/jmoiron/sqlx v1.3.1/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= +github.com/jmoiron/sqlx v1.3.4/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/joho/godotenv v1.4.0/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= @@ -810,7 +834,9 @@ github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtB github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= @@ -818,11 +844,12 @@ github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc8 github.com/magefile/mage v1.9.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magefile/mage v1.11.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magefile/mage v1.12.1/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= -github.com/magefile/mage v1.13.0 h1:XtLJl8bcCM7EFoO8FyH8XK3t7G5hQAeK+i4tq+veT9M= -github.com/magefile/mage v1.13.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magefile/mage v1.14.0 h1:6QDX3g6z1YvJ4olPhT1wksUcSa/V0a1B+pJb73fBjyo= +github.com/magefile/mage v1.14.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= @@ -856,10 +883,12 @@ github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-shellwords v1.0.11/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/mholt/archiver/v3 v3.5.1/go.mod h1:e3dqJ7H78uzsRSEACH1joayhuSyhnonssnDhppzS1L4= @@ -872,7 +901,7 @@ github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceT github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/copystructure v1.1.1/go.mod h1:EBArHfARyrSWO/+Wyr9zwEkc6XMFB9XyNgFNmRkZZU4= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= @@ -893,18 +922,19 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= -github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= -github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 h1:yH0SvLzcbZxcJXho2yh7CqdENGMQe73Cw3woZBpPli0= github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -936,16 +966,20 @@ github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6 github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= @@ -1030,6 +1064,7 @@ github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -1065,6 +1100,7 @@ github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43 github.com/santhosh-tekuri/jsonschema v1.2.4 h1:hNhW8e7t+H1vgY+1QeEQpveR6D4+OwKPXCfD2aieJis= github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= @@ -1144,7 +1180,7 @@ github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tebeka/go2xunit v1.4.10/go.mod h1:wmc9jKT7KlU4QLU6DNTaIXNnYNOjKKNlp6mjOS0UrqY= -github.com/testcontainers/testcontainers-go v0.11.0/go.mod h1:HztBCODzuA+YpMXGK8amjO8j50jz2gcT0BOzSKUiYIs= +github.com/testcontainers/testcontainers-go v0.13.0/go.mod h1:z1abufU633Eb/FmSBTzV6ntZAC1eZBYPtaFsn4nPuDk= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo= github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= @@ -1278,6 +1314,7 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1297,19 +1334,17 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871 h1:/pEO3GD/ABYAjuakUS6xSEmmlyVS4kxBNkeA9tLJiTI= +golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1406,12 +1441,16 @@ golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5o golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211108170745-6635138e15ea/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220107192237-5cfca573fb4d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 h1:HVyaeDAYux4pnY+D/SiwmLOR36ewZ4iGQIIrtnuCjFA= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -1474,6 +1513,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1565,6 +1605,7 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211102192858-4dd72447c267/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1573,9 +1614,7 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= @@ -1594,13 +1633,16 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -1620,6 +1662,7 @@ golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1655,12 +1698,14 @@ golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200526224456-8b020aee10d2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1672,8 +1717,8 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= -golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.9 h1:j9KsMiaP1c3B0OTQGth0/k+miLGTgLsAFUCrF2vLcF8= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1753,6 +1798,7 @@ google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1760,6 +1806,7 @@ google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1793,6 +1840,7 @@ google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220426171045-31bebdecfb46 h1:G1IeWbjrqEq9ChWxEuRPJu6laA67+XgTFHVSAvepr38= google.golang.org/genproto v0.0.0-20220426171045-31bebdecfb46/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= @@ -1827,6 +1875,7 @@ google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.46.0 h1:oCjezcn6g6A75TGoKYBPgKmVBLexhYLM6MebdrPApP8= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= @@ -1904,7 +1953,7 @@ gotest.tools/gotestsum v1.7.0/go.mod h1:V1m4Jw3eBerhI/A6qCxUE07RnCg7ACkKj9BYcAm0 gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -helm.sh/helm/v3 v3.7.2/go.mod h1:UXuiAn0+FfBpqbiMuwWt8/aAKkfJvnWLBJ6f4HcFs0M= +helm.sh/helm/v3 v3.8.0/go.mod h1:0nYPSuvuj8TTJDLRSAfbzGGbazPZsayaDpP8s9FfZT8= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1921,85 +1970,84 @@ howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= -k8s.io/api v0.22.4/go.mod h1:Rgs+9gIGYC5laXQSZZ9JqT5NevNgoGiOdVWi1BAB3qk= k8s.io/api v0.23.1/go.mod h1:WfXnOnwSqNtG62Y1CdjoMxh7r7u9QXGCkA1u0na2jgo= +k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= k8s.io/api v0.23.4 h1:85gnfXQOWbJa1SiWGpE9EEtHs0UVvDyIsSMpEtl2D4E= k8s.io/api v0.23.4/go.mod h1:i77F4JfyNNrhOjZF7OwwNJS5Y1S9dpwvb9iYRYRczfI= -k8s.io/apiextensions-apiserver v0.22.4/go.mod h1:kH9lxD8dbJ+k0ZizGET55lFgdGjO8t45fgZnCVdZEpw= +k8s.io/apiextensions-apiserver v0.23.1/go.mod h1:0qz4fPaHHsVhRApbtk3MGXNn2Q9M/cVWWhfHdY2SxiM= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apimachinery v0.22.4/go.mod h1:yU6oA6Gnax9RrxGzVvPFFJ+mpnW6PBSqp0sx0I0HHW0= k8s.io/apimachinery v0.23.1/go.mod h1:SADt2Kl8/sttJ62RRsi9MIV4o8f5S3coArm0Iu3fBno= +k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apimachinery v0.23.4 h1:fhnuMd/xUL3Cjfl64j5ULKZ1/J9n8NuQEgNL+WXWfdM= k8s.io/apimachinery v0.23.4/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= -k8s.io/apiserver v0.22.4/go.mod h1:38WmcUZiiy41A7Aty8/VorWRa8vDGqoUzDf2XYlku0E= -k8s.io/cli-runtime v0.22.4/go.mod h1:x35r0ERHXr/MrbR1C6MPJxQ3xKG6+hXi9m2xLzlMPZA= +k8s.io/apiserver v0.23.1/go.mod h1:Bqt0gWbeM2NefS8CjWswwd2VNAKN6lUKR85Ft4gippY= k8s.io/cli-runtime v0.23.1/go.mod h1:r9r8H/qfXo9w+69vwUL7LokKlLRKW5D6A8vUKCx+YL0= +k8s.io/cli-runtime v0.23.3/go.mod h1:yA00O5pDqnjkBh8fkuugBbfIfjB1nOpz+aYLotbnOfc= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= -k8s.io/client-go v0.22.4/go.mod h1:Yzw4e5e7h1LNHA4uqnMVrpEpUs1hJOiuBsJKIlRCHDA= k8s.io/client-go v0.23.1/go.mod h1:6QSI8fEuqD4zgFK0xbdwfB/PthBsIxCJMa3s17WlcO0= +k8s.io/client-go v0.23.3/go.mod h1:47oMd+YvAOqZM7pcQ6neJtBiFH7alOyfunYN48VsmwE= k8s.io/client-go v0.23.4 h1:YVWvPeerA2gpUudLelvsolzH7c2sFoXXR5wM/sWqNFU= k8s.io/client-go v0.23.4/go.mod h1:PKnIL4pqLuvYUK1WU7RLTMYKPiIh7MYShLshtRY9cj0= -k8s.io/code-generator v0.22.4/go.mod h1:qjYl54pQ/emhkT0UxbufbREYJMWsHNNV/jSVwhYZQGw= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= +k8s.io/code-generator v0.23.1/go.mod h1:V7yn6VNTCWW8GqodYCESVo95fuiEg713S8B7WacWZDA= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= -k8s.io/component-base v0.22.4/go.mod h1:MrSaQy4a3tFVViff8TZL6JHYSewNCLshZCwHYM58v5A= -k8s.io/component-helpers v0.22.4/go.mod h1:A50qTyczDFbhZDifIfS2zFrHuPk9UNOWPpvNZ+3RSIs= +k8s.io/component-base v0.23.1/go.mod h1:6llmap8QtJIXGDd4uIWJhAq0Op8AtQo6bDW2RrNMTeo= +k8s.io/component-helpers v0.23.1/go.mod h1:ZK24U+2oXnBPcas2KolLigVVN9g5zOzaHLkHiQMFGr0= k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= -k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= -k8s.io/kubectl v0.22.4/go.mod h1:ok2qRT6y2Gy4+y+mniJVyUMKeBHP4OWS9Rdtf/QTM5I= +k8s.io/kubectl v0.23.1/go.mod h1:Ui7dJKdUludF8yWAOSN7JZEkOuYixX5yF6E6NjoukKE= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -k8s.io/metrics v0.22.4/go.mod h1:6F/iwuYb1w2QDCoHkeMFLf4pwHBcYKLm4mPtVHKYrIw= +k8s.io/metrics v0.23.1/go.mod h1:qXvsM1KANrc+ZZeFwj6Phvf0NLiC+d3RwcsLcdGc+xs= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20211116205334-6203023598ed h1:ck1fRPWPJWsMd8ZRFsWc6mh/zHp5fZ/shhbrgPUxDAE= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -oras.land/oras-go v0.4.0/go.mod h1:VJcU+VE4rkclUbum5C0O7deEZbBYnsnpbGSACwTjOcg= +oras.land/oras-go v1.1.0/go.mod h1:1A7vR/0KknT2UkJVWh+xMi95I/AhK8ZrxrnUSmXN0bQ= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/letsencrypt v0.0.3/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25/go.mod h1:Mlj9PNLmG9bZ6BHFwFKDo5afkpWyUISkb9Me0GnK66I= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= -sigs.k8s.io/kustomize/api v0.8.11/go.mod h1:a77Ls36JdfCWojpUqR6m60pdGY1AYFix4AH83nJtY1g= sigs.k8s.io/kustomize/api v0.10.1/go.mod h1:2FigT1QN6xKdcnGS2Ppp1uIWrtWN28Ms8A3OZUZhwr8= -sigs.k8s.io/kustomize/cmd/config v0.9.13/go.mod h1:7547FLF8W/lTaDf0BDqFTbZxM9zqwEJqCKN9sSR0xSs= -sigs.k8s.io/kustomize/kustomize/v4 v4.2.0/go.mod h1:MOkR6fmhwG7hEDRXBYELTi5GSFcLwfqwzTRHW3kv5go= -sigs.k8s.io/kustomize/kyaml v0.11.0/go.mod h1:GNMwjim4Ypgp/MueD3zXHLRJEjz7RvtPae0AwlvEMFM= +sigs.k8s.io/kustomize/cmd/config v0.10.2/go.mod h1:K2aW7nXJ0AaT+VA/eO0/dzFLxmpFcTzudmAgDwPY1HQ= +sigs.k8s.io/kustomize/kustomize/v4 v4.4.1/go.mod h1:qOKJMMz2mBP+vcS7vK+mNz4HBLjaQSWRY22EF6Tb7Io= sigs.k8s.io/kustomize/kyaml v0.13.0/go.mod h1:FTJxEZ86ScK184NpGSAQcfEqee0nul8oLCK30D47m4E= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go index e899f71e2fb..9ea9d59762a 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go @@ -34,8 +34,8 @@ const ( apiStatusTimeout = 15 * time.Second ) -// PolicyChange is a handler for POLICY_CHANGE action. -type PolicyChange struct { +// PolicyChangeHandler is a handler for POLICY_CHANGE action. +type PolicyChangeHandler struct { log *logger.Logger agentInfo *info.AgentInfo config *configuration.Configuration @@ -44,16 +44,16 @@ type PolicyChange struct { setters []actions.ClientSetter } -// NewPolicyChange creates a new PolicyChange handler. -func NewPolicyChange( +// NewPolicyChangeHandler creates a new PolicyChange handler. +func NewPolicyChangeHandler( log *logger.Logger, agentInfo *info.AgentInfo, config *configuration.Configuration, store storage.Store, ch chan coordinator.ConfigChange, setters ...actions.ClientSetter, -) *PolicyChange { - return &PolicyChange{ +) *PolicyChangeHandler { + return &PolicyChangeHandler{ log: log, agentInfo: agentInfo, config: config, @@ -64,7 +64,7 @@ func NewPolicyChange( } // AddSetter adds a setter into a collection of client setters. -func (h *PolicyChange) AddSetter(cs actions.ClientSetter) { +func (h *PolicyChangeHandler) AddSetter(cs actions.ClientSetter) { if h.setters == nil { h.setters = make([]actions.ClientSetter, 0) } @@ -73,7 +73,7 @@ func (h *PolicyChange) AddSetter(cs actions.ClientSetter) { } // Handle handles policy change action. -func (h *PolicyChange) Handle(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { +func (h *PolicyChangeHandler) Handle(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { h.log.Debugf("handlerPolicyChange: action '%+v' received", a) action, ok := a.(*fleetapi.ActionPolicyChange) if !ok { @@ -91,21 +91,16 @@ func (h *PolicyChange) Handle(ctx context.Context, a fleetapi.Action, acker acke return err } - h.ch <- &policyChange{ - ctx: ctx, - cfg: c, - action: a, - acker: acker, - } + h.ch <- newPolicyChange(ctx, c, a, acker, false) return nil } // Watch returns the channel for configuration change notifications. -func (h *PolicyChange) Watch() <-chan coordinator.ConfigChange { +func (h *PolicyChangeHandler) Watch() <-chan coordinator.ConfigChange { return h.ch } -func (h *PolicyChange) handleFleetServerHosts(ctx context.Context, c *config.Config) (err error) { +func (h *PolicyChangeHandler) handleFleetServerHosts(ctx context.Context, c *config.Config) (err error) { // do not update fleet-server host from policy; no setters provided with local Fleet Server if len(h.setters) == 0 { return nil @@ -226,11 +221,33 @@ func fleetToReader(agentInfo *info.AgentInfo, cfg *configuration.Configuration) } type policyChange struct { - ctx context.Context - cfg *config.Config - action fleetapi.Action - acker acker.Acker - commit bool + ctx context.Context + cfg *config.Config + action fleetapi.Action + acker acker.Acker + commit bool + ackWatcher chan struct{} +} + +func newPolicyChange( + ctx context.Context, + config *config.Config, + action fleetapi.Action, + acker acker.Acker, + commit bool) *policyChange { + var ackWatcher chan struct{} + if commit { + // we don't need it otherwise + ackWatcher = make(chan struct{}) + } + return &policyChange{ + ctx: ctx, + cfg: config, + action: action, + acker: acker, + commit: true, + ackWatcher: ackWatcher, + } } func (l *policyChange) Config() *config.Config { @@ -246,11 +263,30 @@ func (l *policyChange) Ack() error { return err } if l.commit { - return l.acker.Commit(l.ctx) + err := l.acker.Commit(l.ctx) + if l.ackWatcher != nil && err == nil { + close(l.ackWatcher) + } + return err } return nil } +// WaitAck waits for policy change to be acked. +// Policy change ack is awaitable only in case commit flag was set. +// Caller is responsible to use any reasonable deadline otherwise +// function call can be endlessly blocking. +func (l *policyChange) WaitAck(ctx context.Context) { + if !l.commit || l.ackWatcher == nil { + return + } + + select { + case <-l.ackWatcher: + case <-ctx.Done(): + } +} + func (l *policyChange) Fail(_ error) { // do nothing } diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_policy_change_test.go b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change_test.go index 34114153875..e1e37ec286e 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_policy_change_test.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change_test.go @@ -39,7 +39,7 @@ func TestPolicyChange(t *testing.T) { } cfg := configuration.DefaultConfiguration() - handler := NewPolicyChange(log, agentInfo, cfg, nullStore, ch) + handler := NewPolicyChangeHandler(log, agentInfo, cfg, nullStore, ch) err := handler.Handle(context.Background(), action, ack) require.NoError(t, err) @@ -67,7 +67,7 @@ func TestPolicyAcked(t *testing.T) { } cfg := configuration.DefaultConfiguration() - handler := NewPolicyChange(log, agentInfo, cfg, nullStore, ch) + handler := NewPolicyChangeHandler(log, agentInfo, cfg, nullStore, ch) err := handler.Handle(context.Background(), action, tacker) require.NoError(t, err) diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_settings.go b/internal/pkg/agent/application/actions/handlers/handler_action_settings.go index eed67a50682..1c92978d982 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_settings.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_settings.go @@ -8,19 +8,15 @@ import ( "context" "fmt" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) -type reexecManager interface { - ReExec(cb reexec.ShutdownCallbackFn, argOverrides ...string) -} - // Settings handles settings change coming from fleet and updates log level. type Settings struct { log *logger.Logger @@ -53,8 +49,14 @@ func (h *Settings) Handle(ctx context.Context, a fleetapi.Action, acker acker.Ac return fmt.Errorf("invalid log level, expected debug|info|warning|error and received '%s'", action.LogLevel) } + lvl := logp.InfoLevel + err := lvl.Unpack(action.LogLevel) + if err != nil { + return fmt.Errorf("failed to unpack log level: %w", err) + } + if err := h.agentInfo.SetLogLevel(action.LogLevel); err != nil { - return errors.New("failed to update log level", err) + return fmt.Errorf("failed to update log level: %w", err) } if err := acker.Ack(ctx, a); err != nil { @@ -63,8 +65,8 @@ func (h *Settings) Handle(ctx context.Context, a fleetapi.Action, acker acker.Ac h.log.Errorf("failed to commit acker after acknowledging action with id '%s'", action.ActionID) } - h.coord.ReExec(nil) - return nil + h.log.Infof("Settings action done, setting agent log level to %s", lvl.String()) + return h.coord.SetLogLevel(ctx, lvl) } func isSupportedLogLevel(level string) bool { diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_unenroll.go b/internal/pkg/agent/application/actions/handlers/handler_action_unenroll.go index 045d52a4fcf..b4c4863b062 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_unenroll.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_unenroll.go @@ -7,6 +7,7 @@ package handlers import ( "context" "fmt" + "time" "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/config" @@ -15,6 +16,10 @@ import ( "github.com/elastic/elastic-agent/pkg/core/logger" ) +const ( + unenrollTimeout = 15 * time.Second +) + type stateStore interface { Add(fleetapi.Action) AckToken() string @@ -60,20 +65,22 @@ func (h *Unenroll) Handle(ctx context.Context, a fleetapi.Action, acker acker.Ac a = nil } - h.ch <- &policyChange{ - ctx: ctx, - cfg: config.New(), - action: a, - acker: acker, - commit: true, - } + unenrollPolicy := newPolicyChange(ctx, config.New(), a, acker, true) + h.ch <- unenrollPolicy if h.stateStore != nil { // backup action for future start to avoid starting fleet gateway loop h.stateStore.Add(a) - h.stateStore.Save() + if err := h.stateStore.Save(); err != nil { + h.log.Warnf("Failed to update state store: %v", err) + } } + unenrollCtx, cancel := context.WithTimeout(ctx, unenrollTimeout) + defer cancel() + + unenrollPolicy.WaitAck(unenrollCtx) + // close fleet gateway loop for _, c := range h.closers { c() diff --git a/internal/pkg/agent/application/application.go b/internal/pkg/agent/application/application.go index 75435cf8e45..dceb31b0dab 100644 --- a/internal/pkg/agent/application/application.go +++ b/internal/pkg/agent/application/application.go @@ -7,6 +7,8 @@ package application import ( "fmt" + "github.com/elastic/elastic-agent-libs/logp" + "go.elastic.co/apm" "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" @@ -28,9 +30,11 @@ import ( // New creates a new Agent and bootstrap the required subsystem. func New( log *logger.Logger, + logLevel logp.Level, agentInfo *info.AgentInfo, reexec coordinator.ReExecManager, tracer *apm.Tracer, + disableMonitoring bool, modifiers ...component.PlatformModifier, ) (*coordinator.Coordinator, error) { platform, err := component.LoadPlatformDetail(modifiers...) @@ -64,10 +68,12 @@ func New( return nil, fmt.Errorf("failed to load configuration: %w", err) } + // monitoring is not supported in bootstrap mode https://github.com/elastic/elastic-agent/issues/1761 + isMonitoringSupported := !disableMonitoring && cfg.Settings.V1MonitoringEnabled upgrader := upgrade.NewUpgrader(log, cfg.Settings.DownloadConfig, agentInfo) - monitor := monitoring.New(cfg.Settings.V1MonitoringEnabled, cfg.Settings.DownloadConfig.OS(), cfg.Settings.MonitoringConfig, agentInfo) + monitor := monitoring.New(isMonitoringSupported, cfg.Settings.DownloadConfig.OS(), cfg.Settings.MonitoringConfig, agentInfo) - runtime, err := runtime.NewManager(log, cfg.Settings.GRPC.String(), agentInfo, tracer, monitor) + runtime, err := runtime.NewManager(log, cfg.Settings.GRPC.String(), agentInfo, tracer, monitor, cfg.Settings.GRPC) if err != nil { return nil, fmt.Errorf("failed to initialize runtime manager: %w", err) } @@ -105,7 +111,7 @@ func New( composableManaged = true compModifiers = append(compModifiers, FleetServerComponentModifier(cfg.Fleet.Server), - EndpointComponentModifier(cfg.Fleet)) + InjectFleetConfigComponentModifier(cfg.Fleet, agentInfo)) managed, err = newManagedConfigManager(log, agentInfo, cfg, store, runtime) if err != nil { @@ -120,7 +126,7 @@ func New( return nil, errors.New(err, "failed to initialize composable controller") } - coord := coordinator.New(log, agentInfo, specs, reexec, upgrader, runtime, configMgr, composable, caps, monitor, compModifiers...) + coord := coordinator.New(log, logLevel, agentInfo, specs, reexec, upgrader, runtime, configMgr, composable, caps, monitor, compModifiers...) if managed != nil { // the coordinator requires the config manager as well as in managed-mode the config manager requires the // coordinator, so it must be set here once the coordinator is created diff --git a/internal/pkg/agent/application/coordinator/coordinator.go b/internal/pkg/agent/application/coordinator/coordinator.go index 48a476a5164..2a393f8957f 100644 --- a/internal/pkg/agent/application/coordinator/coordinator.go +++ b/internal/pkg/agent/application/coordinator/coordinator.go @@ -9,6 +9,8 @@ import ( "errors" "fmt" + "github.com/elastic/elastic-agent-libs/logp" + "gopkg.in/yaml.v2" "github.com/elastic/elastic-agent/internal/pkg/diagnostics" @@ -21,7 +23,7 @@ import ( "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" - agentclient "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" + agentclient "github.com/elastic/elastic-agent/internal/pkg/agent/control/v2/client" "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" "github.com/elastic/elastic-agent/internal/pkg/capabilities" "github.com/elastic/elastic-agent/internal/pkg/config" @@ -66,7 +68,7 @@ type MonitorManager interface { Reload(rawConfig *config.Config) error // InjectMonitoring injects monitoring configuration into resolved ast tree. - MonitoringConfig(map[string]interface{}, map[string]string) (map[string]interface{}, error) + MonitoringConfig(map[string]interface{}, []component.Component, map[string]string) (map[string]interface{}, error) } // Runner provides interface to run a manager and receive running errors. @@ -149,6 +151,7 @@ type State struct { State agentclient.State `yaml:"state"` Message string `yaml:"message"` Components []runtime.ComponentComponentState `yaml:"components"` + LogLevel logp.Level `yaml:"log_level"` } // StateFetcher provides an interface to fetch the current state of the coordinator. @@ -166,6 +169,8 @@ type Coordinator struct { specs component.RuntimeSpecs + logLevelCh chan logp.Level + reexecMgr ReExecManager upgradeMgr UpgradeManager monitorMgr MonitorManager @@ -185,11 +190,12 @@ type Coordinator struct { } // New creates a new coordinator. -func New(logger *logger.Logger, agentInfo *info.AgentInfo, specs component.RuntimeSpecs, reexecMgr ReExecManager, upgradeMgr UpgradeManager, runtimeMgr RuntimeManager, configMgr ConfigManager, varsMgr VarsManager, caps capabilities.Capability, monitorMgr MonitorManager, modifiers ...ComponentsModifier) *Coordinator { +func New(logger *logger.Logger, logLevel logp.Level, agentInfo *info.AgentInfo, specs component.RuntimeSpecs, reexecMgr ReExecManager, upgradeMgr UpgradeManager, runtimeMgr RuntimeManager, configMgr ConfigManager, varsMgr VarsManager, caps capabilities.Capability, monitorMgr MonitorManager, modifiers ...ComponentsModifier) *Coordinator { return &Coordinator{ logger: logger, agentInfo: agentInfo, specs: specs, + logLevelCh: make(chan logp.Level), reexecMgr: reexecMgr, upgradeMgr: upgradeMgr, runtimeMgr: runtimeMgr, @@ -198,7 +204,8 @@ func New(logger *logger.Logger, agentInfo *info.AgentInfo, specs component.Runti caps: caps, modifiers: modifiers, state: coordinatorState{ - state: agentclient.Starting, + state: agentclient.Starting, + logLevel: logLevel, }, monitorMgr: monitorMgr, } @@ -210,6 +217,7 @@ func (c *Coordinator) State(local bool) (s State) { s.State = c.state.state s.Message = c.state.message s.Components = c.runtimeMgr.State() + s.LogLevel = c.state.logLevel if c.state.overrideState != nil { // state has been overridden due to an action that is occurring s.State = c.state.overrideState.state @@ -255,7 +263,7 @@ func (c *Coordinator) ReExec(callback reexec.ShutdownCallbackFn, argOverrides .. // Upgrade runs the upgrade process. func (c *Coordinator) Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade) error { // early check outside of upgrader before overridding the state - if c.upgradeMgr.Upgradeable() { + if !c.upgradeMgr.Upgradeable() { return ErrNotUpgradable } @@ -301,6 +309,18 @@ func (c *Coordinator) PerformDiagnostics(ctx context.Context, req ...runtime.Com return c.runtimeMgr.PerformDiagnostics(ctx, req...) } +// SetLogLevel changes the entire log level for the running Elastic Agent. +func (c *Coordinator) SetLogLevel(ctx context.Context, lvl logp.Level) error { + select { + case <-ctx.Done(): + return ctx.Err() + case c.logLevelCh <- lvl: + // set global once the level change has been taken by the channel + logger.SetLevel(lvl) + return nil + } +} + // Run runs the coordinator. // // The RuntimeManager, ConfigManager and VarsManager that is passed into NewCoordinator are also ran and lifecycle controlled by the Run. @@ -309,7 +329,7 @@ func (c *Coordinator) PerformDiagnostics(ctx context.Context, req ...runtime.Com func (c *Coordinator) Run(ctx context.Context) error { // log all changes in the state of the runtime go func() { - state := make(map[string]coordinatorComponentLogState) + state := make(map[string]runtime.ComponentState) sub := c.runtimeMgr.SubscribeAll(ctx) for { @@ -317,14 +337,55 @@ func (c *Coordinator) Run(ctx context.Context) error { case <-ctx.Done(): return case s := <-sub.Ch(): - logState := newCoordinatorComponentLogState(&s) - _, ok := state[s.Component.ID] + oldState, ok := state[s.Component.ID] if !ok { - c.logger.With("component", logState).Info("New component created") + componentLog := coordinatorComponentLog{ + ID: s.Component.ID, + State: s.State.State.String(), + } + logBasedOnState(c.logger, s.State.State, fmt.Sprintf("Spawned new component %s: %s", s.Component.ID, s.State.Message), "component", componentLog) + for ui, us := range s.State.Units { + unitLog := coordinatorUnitLog{ + ID: ui.UnitID, + Type: ui.UnitType.String(), + State: us.State.String(), + } + logBasedOnState(c.logger, us.State, fmt.Sprintf("Spawned new unit %s: %s", ui.UnitID, us.Message), "component", componentLog, "unit", unitLog) + } } else { - c.logger.With("component", logState).Info("Existing component state changed") + componentLog := coordinatorComponentLog{ + ID: s.Component.ID, + State: s.State.State.String(), + } + if oldState.State != s.State.State { + cl := coordinatorComponentLog{ + ID: s.Component.ID, + State: s.State.State.String(), + OldState: oldState.State.String(), + } + logBasedOnState(c.logger, s.State.State, fmt.Sprintf("Component state changed %s (%s->%s): %s", s.Component.ID, oldState.State.String(), s.State.State.String(), s.State.Message), "component", cl) + } + for ui, us := range s.State.Units { + oldUS, ok := oldState.Units[ui] + if !ok { + unitLog := coordinatorUnitLog{ + ID: ui.UnitID, + Type: ui.UnitType.String(), + State: us.State.String(), + } + logBasedOnState(c.logger, us.State, fmt.Sprintf("Spawned new unit %s: %s", ui.UnitID, us.Message), "component", componentLog, "unit", unitLog) + } else if oldUS.State != us.State { + unitLog := coordinatorUnitLog{ + ID: ui.UnitID, + Type: ui.UnitType.String(), + State: us.State.String(), + OldState: oldUS.State.String(), + } + logBasedOnState(c.logger, us.State, fmt.Sprintf("Unit state changed %s (%s->%s): %s", ui.UnitID, oldUS.State.String(), us.State.String(), us.Message), "component", componentLog, "unit", unitLog) + } + } } - state[s.Component.ID] = logState + state[s.Component.ID] = s.State if s.State.State == client.UnitStateStopped { delete(state, s.Component.ID) } @@ -560,6 +621,14 @@ func (c *Coordinator) runner(ctx context.Context) error { c.logger.Errorf("%s", err) } } + case ll := <-c.logLevelCh: + if ctx.Err() == nil { + if err := c.processLogLevel(ctx, ll); err != nil { + c.state.state = agentclient.Failed + c.state.message = err.Error() + c.logger.Errorf("%s", err) + } + } } } } @@ -630,6 +699,21 @@ func (c *Coordinator) processVars(ctx context.Context, vars []*transpiler.Vars) return nil } +func (c *Coordinator) processLogLevel(ctx context.Context, ll logp.Level) (err error) { + span, ctx := apm.StartSpan(ctx, "log_level", "app.internal") + defer func() { + apm.CaptureError(ctx, err).Send() + span.End() + }() + + c.state.logLevel = ll + + if c.state.ast != nil && c.state.vars != nil { + return c.process(ctx) + } + return nil +} + func (c *Coordinator) process(ctx context.Context) (err error) { span, ctx := apm.StartSpan(ctx, "process", "app.internal") defer func() { @@ -677,7 +761,7 @@ func (c *Coordinator) compute() (map[string]interface{}, []component.Component, configInjector = c.monitorMgr.MonitoringConfig } - comps, err := c.specs.ToComponents(cfg, configInjector) + comps, err := c.specs.ToComponents(cfg, configInjector, c.state.logLevel) if err != nil { return nil, nil, fmt.Errorf("failed to render components: %w", err) } @@ -697,9 +781,10 @@ type coordinatorState struct { message string overrideState *coordinatorOverrideState - config *config.Config - ast *transpiler.AST - vars []*transpiler.Vars + config *config.Config + ast *transpiler.AST + vars []*transpiler.Vars + logLevel logp.Level } type coordinatorOverrideState struct { @@ -707,65 +792,17 @@ type coordinatorOverrideState struct { message string } -type coordinatorComponentLogState struct { - ID string `json:"id"` - State string `json:"state"` - Message string `json:"message"` - Inputs []coordinatorComponentUnitLogState `json:"inputs"` - Output coordinatorComponentUnitLogState `json:"output,omitempty"` -} - -type coordinatorComponentUnitLogState struct { - ID string `json:"id"` - State string `json:"state"` - Message string `json:"message"` -} - -func newCoordinatorComponentLogState(state *runtime.ComponentComponentState) coordinatorComponentLogState { - var output coordinatorComponentUnitLogState - inputs := make([]coordinatorComponentUnitLogState, 0, len(state.State.Units)) - for key, unit := range state.State.Units { - if key.UnitType == client.UnitTypeInput { - inputs = append(inputs, coordinatorComponentUnitLogState{ - ID: key.UnitID, - State: newCoordinatorComponentStateStr(unit.State), - Message: unit.Message, - }) - } else { - output = coordinatorComponentUnitLogState{ - ID: key.UnitID, - State: newCoordinatorComponentStateStr(unit.State), - Message: unit.Message, - } - } - } - return coordinatorComponentLogState{ - ID: state.Component.ID, - State: newCoordinatorComponentStateStr(state.State.State), - Message: state.State.Message, - Inputs: inputs, - Output: output, - } +type coordinatorComponentLog struct { + ID string `json:"id"` + State string `json:"state"` + OldState string `json:"old_state,omitempty"` } -func newCoordinatorComponentStateStr(state client.UnitState) string { - switch state { - case client.UnitStateStarting: - return "Starting" - case client.UnitStateConfiguring: - return "Configuring" - case client.UnitStateDegraded: - return "Degraded" - case client.UnitStateHealthy: - return "Healthy" - case client.UnitStateFailed: - return "Failed" - case client.UnitStateStopping: - return "Stopping" - case client.UnitStateStopped: - return "Stopped" - } - return "Unknown" +type coordinatorUnitLog struct { + ID string `json:"id"` + Type string `json:"type"` + State string `json:"state"` + OldState string `json:"old_state,omitempty"` } func hasState(components []runtime.ComponentComponentState, state client.UnitState) bool { @@ -781,3 +818,24 @@ func hasState(components []runtime.ComponentComponentState, state client.UnitSta } return false } + +func logBasedOnState(l *logger.Logger, state client.UnitState, msg string, args ...interface{}) { + switch state { + case client.UnitStateStarting: + l.With(args...).Info(msg) + case client.UnitStateConfiguring: + l.With(args...).Info(msg) + case client.UnitStateDegraded: + l.With(args...).Warn(msg) + case client.UnitStateHealthy: + l.With(args...).Info(msg) + case client.UnitStateFailed: + l.With(args...).Error(msg) + case client.UnitStateStopping: + l.With(args...).Info(msg) + case client.UnitStateStopped: + l.With(args...).Info(msg) + default: + l.With(args...).Info(msg) + } +} diff --git a/internal/pkg/agent/application/coordinator/handler.go b/internal/pkg/agent/application/coordinator/handler.go index eba0d830e36..1f1dd446156 100644 --- a/internal/pkg/agent/application/coordinator/handler.go +++ b/internal/pkg/agent/application/coordinator/handler.go @@ -9,7 +9,7 @@ import ( "net/http" "time" - "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" + "github.com/elastic/elastic-agent/internal/pkg/agent/control/v2/client" ) // LivenessResponse is the response body for the liveness endpoint. diff --git a/internal/pkg/agent/application/dispatcher/dispatcher.go b/internal/pkg/agent/application/dispatcher/dispatcher.go index e37fbdc770b..a4ec47a96fe 100644 --- a/internal/pkg/agent/application/dispatcher/dispatcher.go +++ b/internal/pkg/agent/application/dispatcher/dispatcher.go @@ -132,6 +132,7 @@ func (ad *ActionDispatcher) Dispatch(ctx context.Context, acker acker.Acker, act strings.Join(detectTypes(actions), ", "), ) + var reportedErr error for _, action := range actions { if err = ctx.Err(); err != nil { ad.errCh <- err @@ -146,14 +147,18 @@ func (ad *ActionDispatcher) Dispatch(ctx context.Context, acker acker.Acker, act continue } ad.log.Debugf("Failed to dispatch action '%+v', error: %+v", action, err) - ad.errCh <- err + reportedErr = err continue } ad.log.Debugf("Successfully dispatched action: '%+v'", action) } if err = acker.Commit(ctx); err != nil { - ad.errCh <- err + reportedErr = err + } + + if len(actions) > 0 { + ad.errCh <- reportedErr } } diff --git a/internal/pkg/agent/application/dispatcher/dispatcher_test.go b/internal/pkg/agent/application/dispatcher/dispatcher_test.go index c9c1397443c..d68e1e508ca 100644 --- a/internal/pkg/agent/application/dispatcher/dispatcher_test.go +++ b/internal/pkg/agent/application/dispatcher/dispatcher_test.go @@ -133,11 +133,11 @@ func TestActionDispatcher(t *testing.T) { success1.On("Handle", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() success2.On("Handle", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() - d.Dispatch(ctx, ack, action1, action2) - select { - case err := <-d.Errors(): + dispatchCtx, cancelFn := context.WithCancel(ctx) + defer cancelFn() + go d.Dispatch(dispatchCtx, ack, action1, action2) + if err := <-d.Errors(); err != nil { t.Fatalf("Unexpected error: %v", err) - default: } success1.AssertExpectations(t) @@ -159,11 +159,12 @@ func TestActionDispatcher(t *testing.T) { action := &mockOtherAction{} action.On("Type").Return("action") action.On("ID").Return("id") - d.Dispatch(ctx, ack, action) - select { - case err := <-d.Errors(): + + dispatchCtx, cancelFn := context.WithCancel(ctx) + defer cancelFn() + go d.Dispatch(dispatchCtx, ack, action) + if err := <-d.Errors(); err != nil { t.Fatalf("Unexpected error: %v", err) - default: } def.AssertExpectations(t) @@ -209,11 +210,11 @@ func TestActionDispatcher(t *testing.T) { action2.On("Type").Return("action") action2.On("ID").Return("id") - d.Dispatch(context.Background(), ack, action1, action2) - select { - case err := <-d.Errors(): + dispatchCtx, cancelFn := context.WithCancel(context.Background()) + defer cancelFn() + go d.Dispatch(dispatchCtx, ack, action1, action2) + if err := <-d.Errors(); err != nil { t.Fatalf("Unexpected error: %v", err) - default: } def.AssertExpectations(t) queue.AssertExpectations(t) @@ -236,11 +237,14 @@ func TestActionDispatcher(t *testing.T) { action.On("Type").Return(fleetapi.ActionTypeCancel) action.On("ID").Return("id") - d.Dispatch(context.Background(), ack, action) + dispatchCtx, cancelFn := context.WithCancel(context.Background()) + defer cancelFn() + go d.Dispatch(dispatchCtx, ack, action) select { case err := <-d.Errors(): t.Fatalf("Unexpected error: %v", err) - default: + case <-time.After(200 * time.Microsecond): + // we're not expecting any reset, } def.AssertExpectations(t) queue.AssertExpectations(t) @@ -269,11 +273,11 @@ func TestActionDispatcher(t *testing.T) { action2.On("Type").Return(fleetapi.ActionTypeCancel) action2.On("ID").Return("id") - d.Dispatch(context.Background(), ack, action2) - select { - case err := <-d.Errors(): + dispatchCtx, cancelFn := context.WithCancel(context.Background()) + defer cancelFn() + go d.Dispatch(dispatchCtx, ack, action2) + if err := <-d.Errors(); err != nil { t.Fatalf("Unexpected error: %v", err) - default: } def.AssertExpectations(t) queue.AssertExpectations(t) @@ -292,11 +296,14 @@ func TestActionDispatcher(t *testing.T) { err = d.Register(&mockAction{}, def) require.NoError(t, err) - d.Dispatch(context.Background(), ack) + dispatchCtx, cancelFn := context.WithCancel(context.Background()) + defer cancelFn() + go d.Dispatch(dispatchCtx, ack) select { case err := <-d.Errors(): t.Fatalf("Unexpected error: %v", err) - default: + case <-time.After(500 * time.Microsecond): + // we're not expecting any reset } def.AssertNotCalled(t, "Handle", mock.Anything, mock.Anything, mock.Anything) }) @@ -324,11 +331,11 @@ func TestActionDispatcher(t *testing.T) { action.On("SetRetryAttempt", 1).Once() action.On("SetStartTime", mock.Anything).Once() - d.Dispatch(context.Background(), ack, action) - select { - case err := <-d.Errors(): + dispatchCtx, cancelFn := context.WithCancel(context.Background()) + defer cancelFn() + go d.Dispatch(dispatchCtx, ack, action) + if err := <-d.Errors(); err != nil { t.Fatalf("Unexpected error: %v", err) - default: } def.AssertExpectations(t) queue.AssertExpectations(t) @@ -358,7 +365,9 @@ func TestActionDispatcher(t *testing.T) { // Kind of a dirty work around to test an error return. // launch in another routing and sleep to check if an error is generated - go d.Dispatch(context.Background(), ack, action1, action2) + dispatchCtx, cancelFn := context.WithCancel(context.Background()) + defer cancelFn() + go d.Dispatch(dispatchCtx, ack, action1, action2) time.Sleep(time.Millisecond * 200) select { case <-d.Errors(): @@ -375,6 +384,55 @@ func TestActionDispatcher(t *testing.T) { def.AssertExpectations(t) queue.AssertExpectations(t) }) + + t.Run("Dispatch multiples events in separate batch returns one error second one resets it", func(t *testing.T) { + def := &mockHandler{} + def.On("Handle", mock.Anything, mock.Anything, mock.Anything).Return(errors.New("test error")).Once() + def.On("Handle", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() + + queue := &mockQueue{} + queue.On("Save").Return(nil).Times(2) + queue.On("DequeueActions").Return([]fleetapi.ScheduledAction{}).Times(2) + + d, err := New(nil, def, queue) + require.NoError(t, err) + err = d.Register(&mockAction{}, def) + require.NoError(t, err) + + action1 := &mockAction{} + action1.On("Type").Return("action") + action1.On("ID").Return("id") + action2 := &mockAction{} + action2.On("Type").Return("action") + action2.On("ID").Return("id") + + // Kind of a dirty work around to test an error return. + // launch in another routing and sleep to check if an error is generated + dispatchCtx1, cancelFn1 := context.WithCancel(context.Background()) + defer cancelFn1() + go d.Dispatch(dispatchCtx1, ack, action1) + select { + case err := <-d.Errors(): + if err == nil { + t.Fatal("Expecting error") + } + case <-time.After(300 * time.Millisecond): + } + + dispatchCtx2, cancelFn2 := context.WithCancel(context.Background()) + defer cancelFn2() + go d.Dispatch(dispatchCtx2, ack, action2) + select { + case err := <-d.Errors(): + if err != nil { + t.Fatal("Unexpected error") + } + case <-time.After(300 * time.Millisecond): + } + + def.AssertExpectations(t) + queue.AssertExpectations(t) + }) } func Test_ActionDispatcher_scheduleRetry(t *testing.T) { diff --git a/internal/pkg/agent/application/fleet_server_bootstrap.go b/internal/pkg/agent/application/fleet_server_bootstrap.go index 62106c30aea..d1938c3a2f4 100644 --- a/internal/pkg/agent/application/fleet_server_bootstrap.go +++ b/internal/pkg/agent/application/fleet_server_bootstrap.go @@ -12,6 +12,7 @@ import ( "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/pkg/component" @@ -22,6 +23,7 @@ const ( elasticsearch = "elasticsearch" fleetServer = "fleet-server" endpoint = "endpoint" + apmServer = "apm" ) // injectFleetServerInput is the base configuration that is used plus the FleetServerComponentModifier that adjusts @@ -83,15 +85,20 @@ func FleetServerComponentModifier(serverCfg *configuration.FleetServerConfig) co } } -// EndpointComponentModifier the modifier for the Endpoint configuration. -// The Endpoint expects the fleet configuration passed to it by the Agent -// because it needs to be able to connect to the fleet server directly. -func EndpointComponentModifier(fleetCfg *configuration.FleetAgentConfig) coordinator.ComponentsModifier { +// InjectFleetConfigComponentModifier The modifier that injects the fleet configuration for the components +// that need to be able to connect to fleet server. +func InjectFleetConfigComponentModifier(fleetCfg *configuration.FleetAgentConfig, agentInfo *info.AgentInfo) coordinator.ComponentsModifier { return func(comps []component.Component, cfg map[string]interface{}) ([]component.Component, error) { + hostsStr := fleetCfg.Client.GetHosts() + fleetHosts := make([]interface{}, 0, len(hostsStr)) + for _, host := range hostsStr { + fleetHosts = append(fleetHosts, host) + } + for i, comp := range comps { - if comp.InputSpec != nil && comp.InputSpec.InputType == endpoint { + if comp.InputSpec != nil && (comp.InputSpec.InputType == endpoint || comp.InputSpec.InputType == apmServer) { for j, unit := range comp.Units { - if unit.Type == client.UnitTypeInput && unit.Config.Type == endpoint { + if unit.Type == client.UnitTypeInput && (unit.Config.Type == endpoint || unit.Config.Type == apmServer) { unitCfgMap, err := toMapStr(unit.Config.Source.AsMap(), map[string]interface{}{"fleet": fleetCfg}) if err != nil { return nil, err @@ -104,6 +111,10 @@ func EndpointComponentModifier(fleetCfg *configuration.FleetAgentConfig) coordin if v, ok := unitCfgMap["fleet"]; ok { if m, ok := v.(map[string]interface{}); ok { m["host"] = cfg["host"] + m["hosts"] = fleetHosts + + // Inject agent log level + injectAgentLoggingLevel(m, agentInfo) } } unitCfg, err := component.ExpectedConfig(unitCfgMap) @@ -121,6 +132,37 @@ func EndpointComponentModifier(fleetCfg *configuration.FleetAgentConfig) coordin } } +type logLevelProvider interface { + LogLevel() string +} + +func injectAgentLoggingLevel(cfg map[string]interface{}, llp logLevelProvider) { + if cfg == nil || llp == nil { + return + } + + var agentMap, loggingMap map[string]interface{} + if v, ok := cfg["agent"]; ok { + agentMap, _ = v.(map[string]interface{}) + } else { + agentMap = make(map[string]interface{}) + cfg["agent"] = agentMap + } + + if agentMap != nil { + if v, ok := agentMap["logging"]; ok { + loggingMap, _ = v.(map[string]interface{}) + } else { + loggingMap = make(map[string]interface{}) + agentMap["logging"] = loggingMap + } + } + + if loggingMap != nil { + loggingMap["level"] = llp.LogLevel() + } +} + type fleetServerBootstrapManager struct { log *logger.Logger diff --git a/internal/pkg/agent/application/fleet_server_bootstrap_test.go b/internal/pkg/agent/application/fleet_server_bootstrap_test.go index 53fd864fdb6..02a97b7161d 100644 --- a/internal/pkg/agent/application/fleet_server_bootstrap_test.go +++ b/internal/pkg/agent/application/fleet_server_bootstrap_test.go @@ -10,14 +10,88 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" + "google.golang.org/protobuf/types/known/structpb" + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" + "github.com/elastic/elastic-agent/internal/pkg/remote" "github.com/elastic/elastic-agent/internal/pkg/testutils" + "github.com/elastic/elastic-agent/pkg/component" ) +func TestInjectFleetConfigComponentModifier(t *testing.T) { + fleetConfig := &configuration.FleetAgentConfig{ + Enabled: true, + Client: remote.Config{ + Host: "sample.host", + }, + } + + cfg := map[string]interface{}{ + "host": map[string]interface{}{ + "id": "agent-id", + }, + } + + modifier := InjectFleetConfigComponentModifier(fleetConfig, nil) + apmSource, err := structpb.NewStruct(map[string]interface{}{ + "sample": "config", + }) + require.NoError(t, err) + + apmComponent := component.Component{ + InputSpec: &component.InputRuntimeSpec{ + InputType: "apm", + }, + Units: []component.Unit{ + { + Type: client.UnitTypeInput, + Config: &proto.UnitExpectedConfig{ + Type: "apm", + Source: apmSource, + }, + }, + }, + } + comps := []component.Component{apmComponent} + resComps, err := modifier(comps, cfg) + require.NoError(t, err) + + require.Equal(t, 1, len(resComps)) + require.Equal(t, 1, len(resComps[0].Units)) + resConfig := resComps[0].Units[0].Config.Source.AsMap() + fleet, ok := resConfig["fleet"] + require.True(t, ok) + + fleetMap, ok := fleet.(map[string]interface{}) + require.True(t, ok) + + hostRaw, found := fleetMap["host"] + require.True(t, found) + + hostsRaw, found := fleetMap["hosts"] + require.True(t, found) + + hostMap, ok := hostRaw.(map[string]interface{}) + require.True(t, ok) + + idRaw, found := hostMap["id"] + require.True(t, found) + require.Equal(t, "agent-id", idRaw.(string)) + + hostsSlice, ok := hostsRaw.([]interface{}) + require.True(t, ok) + require.Equal(t, 1, len(hostsSlice)) + require.Equal(t, "sample.host", hostsSlice[0].(string)) + +} + func TestFleetServerBootstrapManager(t *testing.T) { l := testutils.NewErrorLogger(t) mgr := newFleetServerBootstrapManager(l) @@ -54,3 +128,46 @@ func TestFleetServerBootstrapManager(t *testing.T) { require.NotNil(t, change) assert.NotNil(t, change.Config()) } + +type testLogLevelProvider struct { + logLevel string +} + +func (l *testLogLevelProvider) LogLevel() string { + return l.logLevel +} + +func TestInjectAgentLoggingLevel(t *testing.T) { + tests := []struct { + name string + cfg map[string]interface{} + llp logLevelProvider + res map[string]interface{} + }{ + { + name: "nil", + }, + { + name: "empty", + cfg: map[string]interface{}{}, + llp: &testLogLevelProvider{"debug"}, + res: map[string]interface{}{"agent": map[string]interface{}{"logging": map[string]interface{}{"level": string("debug")}}}, + }, + { + name: "existing agent", + cfg: map[string]interface{}{"agent": map[string]interface{}{"id": "123456"}}, + llp: &testLogLevelProvider{"info"}, + res: map[string]interface{}{"agent": map[string]interface{}{"id": "123456", "logging": map[string]interface{}{"level": string("info")}}}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + injectAgentLoggingLevel(tc.cfg, tc.llp) + diff := cmp.Diff(tc.res, tc.cfg) + if diff != "" { + t.Fatal(diff) + } + }) + } +} diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go index 9b31e7dcf12..155ce81c6a3 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go @@ -12,7 +12,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - agentclient "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" + agentclient "github.com/elastic/elastic-agent/internal/pkg/agent/control/v2/client" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/core/backoff" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" @@ -27,7 +27,7 @@ import ( const maxUnauthCounter int = 6 // Const for decraded state or linter complains -const degraded = "degraded" +const degraded = "DEGRADED" // Default Configuration for the Fleet Gateway. var defaultGatewaySettings = &fleetGatewaySettings{ @@ -234,19 +234,19 @@ func (f *fleetGateway) convertToCheckinComponents(components []runtime.Component stateString := func(s eaclient.UnitState) string { switch s { case eaclient.UnitStateStarting: - return "starting" + return "STARTING" case eaclient.UnitStateConfiguring: - return "configuring" + return "CONFIGURING" case eaclient.UnitStateHealthy: - return "healthy" + return "HEALTHY" case eaclient.UnitStateDegraded: return degraded case eaclient.UnitStateFailed: - return "failed" + return "FAILED" case eaclient.UnitStateStopping: - return "stopping" + return "STOPPING" case eaclient.UnitStateStopped: - return "stopped" + return "STOPPED" } return "" } diff --git a/internal/pkg/agent/application/managed_mode.go b/internal/pkg/agent/application/managed_mode.go index af53e150888..0526633a088 100644 --- a/internal/pkg/agent/application/managed_mode.go +++ b/internal/pkg/agent/application/managed_mode.go @@ -297,8 +297,8 @@ func fleetServerRunning(state runtime.ComponentState) bool { return false } -func (m *managedConfigManager) initDispatcher(canceller context.CancelFunc) *handlers.PolicyChange { - policyChanger := handlers.NewPolicyChange( +func (m *managedConfigManager) initDispatcher(canceller context.CancelFunc) *handlers.PolicyChangeHandler { + policyChanger := handlers.NewPolicyChangeHandler( m.log, m.agentInfo, m.cfg, diff --git a/internal/pkg/agent/application/monitoring/error.go b/internal/pkg/agent/application/monitoring/error.go new file mode 100644 index 00000000000..ebf33d9b7fd --- /dev/null +++ b/internal/pkg/agent/application/monitoring/error.go @@ -0,0 +1,33 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package monitoring + +import "fmt" + +func errorWithStatus(status int, err error) *statusError { + return &statusError{ + err: err, + status: status, + } +} + +func errorfWithStatus(status int, msg string, args ...string) *statusError { + err := fmt.Errorf(msg, args) + return errorWithStatus(status, err) +} + +// StatusError holds correlation between error and a status +type statusError struct { + err error + status int +} + +func (s *statusError) Status() int { + return s.status +} + +func (s *statusError) Error() string { + return s.err.Error() +} diff --git a/internal/pkg/agent/application/monitoring/process.go b/internal/pkg/agent/application/monitoring/process.go new file mode 100644 index 00000000000..f3b8f6241d5 --- /dev/null +++ b/internal/pkg/agent/application/monitoring/process.go @@ -0,0 +1,180 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package monitoring + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "syscall" + "time" + + "github.com/gorilla/mux" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" +) + +const ( + componentIDKey = "componentID" + metricsPathKey = "metricsPath" + timeout = 10 * time.Second + apmPrefix = "apm-server" + apmTypePrefix = "apm" + fleetServerPrefix = "fleet-server" +) + +var redirectPathAllowlist = map[string]struct{}{ + "": {}, + "stats": {}, + "state": {}, +} + +var redirectableProcesses = []string{ + apmTypePrefix, + fleetServerPrefix, +} + +func processHandler(coord *coordinator.Coordinator, statsHandler func(http.ResponseWriter, *http.Request) error, operatingSystem string) func(http.ResponseWriter, *http.Request) error { + return func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + + vars := mux.Vars(r) + + componentID, found := vars[componentIDKey] + if !found { + return errorfWithStatus(http.StatusNotFound, "process with specified ID not found") + } + + if componentID == "" || componentID == paths.BinaryName { + // proxy stats for elastic agent process + return statsHandler(w, r) + } + + componentID = cloudComponentIDToAgentInputType(componentID) + + if isProcessRedirectable(componentID) { + // special handling for redirectable processes + // apm needs its own output even for no path + metricsPath := vars[metricsPathKey] + _, ok := redirectPathAllowlist[metricsPath] + if !ok { + return errorfWithStatus(http.StatusNotFound, "process specified does not expose metrics") + } + + if strings.HasPrefix(componentID, fleetServerPrefix) && metricsPathKey == "" { + // special case, fleet server is expected to return stats right away + // removing this would be breaking + metricsPath = "stats" + } + + return redirectToPath(w, r, componentID, metricsPath, operatingSystem) + } + + state := coord.State(false) + + for _, c := range state.Components { + if matchesCloudProcessID(&c.Component, componentID) { + data := struct { + State string `json:"state"` + Message string `json:"message"` + }{ + State: c.State.State.String(), + Message: c.State.Message, + } + + bytes, err := json.Marshal(data) + var content string + if err != nil { + content = fmt.Sprintf("Not valid json: %v", err) + } else { + content = string(bytes) + } + fmt.Fprint(w, content) + + return nil + } + } + + return errorWithStatus(http.StatusNotFound, fmt.Errorf("matching component %v not found", componentID)) + } +} + +func isProcessRedirectable(componentID string) bool { + processNameLower := strings.ToLower(componentID) + for _, prefix := range redirectableProcesses { + if strings.HasPrefix(processNameLower, prefix) { + return true + } + } + return false +} + +func redirectToPath(w http.ResponseWriter, r *http.Request, id, path, operatingSystem string) error { + endpoint := prefixedEndpoint(endpointPath(id, operatingSystem)) + metricsBytes, statusCode, metricsErr := processMetrics(r.Context(), endpoint, path) + if metricsErr != nil { + return metricsErr + } + + if statusCode > 0 { + w.WriteHeader(statusCode) + } + + fmt.Fprint(w, string(metricsBytes)) + return nil +} + +func processMetrics(ctx context.Context, endpoint, path string) ([]byte, int, error) { + hostData, err := parseURL(endpoint, "http", "", "", path, "") + if err != nil { + return nil, 0, errorWithStatus(http.StatusInternalServerError, err) + } + + dialer, err := hostData.transport.Make(timeout) + if err != nil { + return nil, 0, errorWithStatus(http.StatusInternalServerError, err) + } + + client := http.Client{ + Timeout: timeout, + Transport: &http.Transport{ + Dial: dialer.Dial, + }, + } + + req, err := http.NewRequest(http.MethodGet, hostData.uri, nil) + if err != nil { + return nil, 0, errorWithStatus( + http.StatusInternalServerError, + fmt.Errorf("fetching metrics failed: %w", err), + ) + } + + req.Close = true + cctx, cancelFn := context.WithCancel(ctx) + defer cancelFn() + + resp, err := client.Do(req.WithContext(cctx)) + if err != nil { + statusCode := http.StatusInternalServerError + if errors.Is(err, syscall.ENOENT) { + statusCode = http.StatusNotFound + } + return nil, 0, errorWithStatus(statusCode, err) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, 0, errorWithStatus(http.StatusInternalServerError, err) + } + + return rb, resp.StatusCode, nil +} diff --git a/internal/pkg/agent/application/monitoring/processes.go b/internal/pkg/agent/application/monitoring/processes.go new file mode 100644 index 00000000000..7066bd997bc --- /dev/null +++ b/internal/pkg/agent/application/monitoring/processes.go @@ -0,0 +1,78 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package monitoring + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" +) + +type source struct { + Kind string `json:"kind"` + Outputs []string `json:"outputs"` +} + +type process struct { + ID string `json:"id"` + PID string `json:"pid,omitempty"` + Binary string `json:"binary"` + Source source `json:"source"` +} + +func sourceFromComponentID(procID string) source { + var s source + var out string + if pos := strings.LastIndex(procID, "-"); pos != -1 { + out = procID[pos+1:] + } + if strings.HasSuffix(out, "monitoring") { + s.Kind = "internal" + } else { + s.Kind = "configured" + } + s.Outputs = []string{out} + return s +} + +func processesHandler(coord *coordinator.Coordinator) func(http.ResponseWriter, *http.Request) error { + return func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + + procs := make([]process, 0) + + state := coord.State(false) + + for _, c := range state.Components { + if c.Component.InputSpec != nil { + procs = append(procs, process{ + ID: expectedCloudProcessID(&c.Component), + PID: c.LegacyPID, + Binary: c.Component.InputSpec.BinaryName, + Source: sourceFromComponentID(c.Component.ID), + }) + } + } + data := struct { + Processes []process `json:"processes"` + }{ + Processes: procs, + } + + bytes, err := json.Marshal(data) + var content string + if err != nil { + content = fmt.Sprintf("Not valid json: %v", err) + } else { + content = string(bytes) + } + fmt.Fprint(w, content) + + return nil + } +} diff --git a/internal/pkg/agent/application/monitoring/processes_cloud.go b/internal/pkg/agent/application/monitoring/processes_cloud.go new file mode 100644 index 00000000000..c4ba36a1d35 --- /dev/null +++ b/internal/pkg/agent/application/monitoring/processes_cloud.go @@ -0,0 +1,45 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package monitoring + +import ( + "strings" + + "github.com/elastic/elastic-agent/pkg/component" +) + +func cloudComponentIDToAgentInputType(componentID string) string { + if strings.HasPrefix(componentID, apmPrefix) { + // from binary name back to input type, keep the output name as is (apm-default) + return strings.Replace(componentID, apmPrefix, apmTypePrefix, 1) + } + return componentID +} + +func expectedCloudProcessID(c *component.Component) string { + // Cloud explicitly looks for an ID of "apm-server" to determine if APM is in managed mode. + // Ensure that this is the ID we use, in agent v2 the ID is usually "apm-default". + // Otherwise apm-server won't be routable/accessible in cloud. + // https://github.com/elastic/elastic-agent/issues/1731#issuecomment-1325862913 + if strings.Contains(c.InputSpec.BinaryName, "apm-server") { + // cloud understands `apm-server-default` and does not understand `apm-default` + return strings.Replace(c.ID, "apm-", "apm-server-", 1) + } + + return c.ID +} + +func matchesCloudProcessID(c *component.Component, id string) bool { + // Similar to the case above, cloud currently makes a call to /processes/apm-server + // to find the APM server address. Rather than change all of the monitoring in cloud, + // it is easier to just make sure the existing ID maps to the APM server component. + if strings.Contains(id, "apm-server") { + if strings.Contains(c.InputSpec.BinaryName, "apm-server") { + return true + } + } + + return id == c.ID +} diff --git a/internal/pkg/agent/application/monitoring/processes_cloud_test.go b/internal/pkg/agent/application/monitoring/processes_cloud_test.go new file mode 100644 index 00000000000..31553978075 --- /dev/null +++ b/internal/pkg/agent/application/monitoring/processes_cloud_test.go @@ -0,0 +1,149 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package monitoring + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/elastic-agent/pkg/component" +) + +func TestCloudComponentIDToAgentInputType(t *testing.T) { + testcases := []struct { + name string + componentID string + expectedID string + }{ + { + "apm server", + "apm-server-default", + "apm-default", + }, + { + "not apm", + "filestream-default", + "filestream-default", + }, + { + "almost apm", + "apm-java-attacher-default", + "apm-java-attacher-default", + }, + { + "apm in output name", + "endpoint-apm-output", + "endpoint-apm-output", + }, + { + "apm-server in output name", + "endpoint-apm-server-output", + "endpoint-apm-server-output", + }, + { + "apm-server everywhere", + "apm-server-with-apm-server-output", + "apm-with-apm-server-output", + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.expectedID, cloudComponentIDToAgentInputType(tc.componentID)) + }) + } +} + +func TestExpectedCloudProcessID(t *testing.T) { + testcases := []struct { + name string + component component.Component + id string + }{ + { + "APM", + component.Component{ + ID: "apm-default", + InputSpec: &component.InputRuntimeSpec{BinaryName: "apm-server"}, + }, + "apm-server-default", + }, + { + "NotAPM", + component.Component{ + ID: "filestream-default", + InputSpec: &component.InputRuntimeSpec{BinaryName: "filebeat"}, + }, + "filestream-default", + }, + { + "AlmostAPM", + component.Component{ + ID: "apm-java-attacher-default", + InputSpec: &component.InputRuntimeSpec{BinaryName: "apm-java-attacher"}, + }, + "apm-java-attacher-default", + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.id, expectedCloudProcessID(&tc.component)) + }) + } +} + +func TestMatchesCloudProcessID(t *testing.T) { + testcases := []struct { + name string + processID string + component component.Component + matches bool + }{ + { + "MatchesAPMServer", + "apm-server", + component.Component{ + ID: "apm-default", + InputSpec: &component.InputRuntimeSpec{BinaryName: "apm-server"}, + }, + true, + }, + { + "MatchesAPMDefault", + "apm-default", + component.Component{ + ID: "apm-default", + InputSpec: &component.InputRuntimeSpec{BinaryName: "apm-server"}, + }, + true, + }, + { + "MatchesFilestream", + "filestream-default", + component.Component{ + ID: "filestream-default", + InputSpec: &component.InputRuntimeSpec{BinaryName: "filebeat"}, + }, + true, + }, + { + "DoesNotMatch", + "filestream-default", + component.Component{ + ID: "metricbeat-default", + InputSpec: &component.InputRuntimeSpec{BinaryName: "metricbeat"}, + }, + false, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.matches, matchesCloudProcessID(&tc.component, tc.processID)) + }) + } +} diff --git a/internal/pkg/agent/application/monitoring/server.go b/internal/pkg/agent/application/monitoring/server.go index ef5a26df9d2..390a472d5ed 100644 --- a/internal/pkg/agent/application/monitoring/server.go +++ b/internal/pkg/agent/application/monitoring/server.go @@ -6,6 +6,7 @@ package monitoring import ( "net/http" + "net/url" "os" "path/filepath" "runtime" @@ -18,6 +19,7 @@ import ( "github.com/elastic/elastic-agent-libs/api" "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/monitoring" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -27,6 +29,9 @@ func NewServer( endpointConfig api.Config, ns func(string) *monitoring.Namespace, tracer *apm.Tracer, + coord *coordinator.Coordinator, + enableProcessStats bool, + operatingSystem string, ) (*api.Server, error) { if err := createAgentMonitoringDrop(endpointConfig.Host); err != nil { // log but ignore @@ -38,7 +43,7 @@ func NewServer( return nil, err } - return exposeMetricsEndpoint(log, cfg, ns, tracer) + return exposeMetricsEndpoint(log, cfg, ns, tracer, coord, enableProcessStats, operatingSystem) } func exposeMetricsEndpoint( @@ -46,6 +51,9 @@ func exposeMetricsEndpoint( config *config.C, ns func(string) *monitoring.Namespace, tracer *apm.Tracer, + coord *coordinator.Coordinator, + enableProcessStats bool, + operatingSystem string, ) (*api.Server, error) { r := mux.NewRouter() if tracer != nil { @@ -54,6 +62,13 @@ func exposeMetricsEndpoint( statsHandler := statsHandler(ns("stats")) r.Handle("/stats", createHandler(statsHandler)) + if enableProcessStats { + r.Handle("/processes", createHandler(processesHandler(coord))) + r.Handle("/processes/{componentID}", createHandler(processHandler(coord, statsHandler, operatingSystem))) + r.Handle("/processes/{componentID}/", createHandler(processHandler(coord, statsHandler, operatingSystem))) + r.Handle("/processes/{componentID}/{metricsPath}", createHandler(processHandler(coord, statsHandler, operatingSystem))) + } + mux := http.NewServeMux() mux.Handle("/", r) @@ -61,7 +76,7 @@ func exposeMetricsEndpoint( } func createAgentMonitoringDrop(drop string) error { - if drop == "" || runtime.GOOS == "windows" { + if drop == "" || runtime.GOOS == "windows" || isHttpUrl(drop) { return nil } @@ -84,3 +99,8 @@ func createAgentMonitoringDrop(drop string) error { return os.Chown(path, os.Geteuid(), os.Getegid()) } + +func isHttpUrl(s string) bool { + u, err := url.Parse(strings.TrimSpace(s)) + return err == nil && (u.Scheme == "http" || u.Scheme == "https") && u.Host != "" +} diff --git a/internal/pkg/agent/application/monitoring/server_test.go b/internal/pkg/agent/application/monitoring/server_test.go new file mode 100644 index 00000000000..3c2f1c5aa92 --- /dev/null +++ b/internal/pkg/agent/application/monitoring/server_test.go @@ -0,0 +1,65 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package monitoring + +import ( + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestIsHTTPUrl(t *testing.T) { + + tests := []struct { + name string + s string + res bool + }{ + { + name: "empty", + }, + { + name: "/", + s: "/", + }, + { + name: "relative", + s: "foo/bar", + }, + { + name: "absolute", + s: "/foo/bar", + }, + { + name: "file", + s: "file://foo/bar", + }, + { + name: "http", + s: "http://localhost:5691", + res: true, + }, + { + name: "https", + s: "https://localhost:5691", + res: true, + }, + { + name: "http space prefix", + s: " http://localhost:5691", + res: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + res := isHttpUrl(tc.s) + diff := cmp.Diff(tc.res, res) + if diff != "" { + t.Error(diff) + } + }) + } +} diff --git a/internal/pkg/agent/application/monitoring/url.go b/internal/pkg/agent/application/monitoring/url.go new file mode 100644 index 00000000000..d5a17441d7e --- /dev/null +++ b/internal/pkg/agent/application/monitoring/url.go @@ -0,0 +1,207 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package monitoring + +import ( + "fmt" + "net" + "net/url" + "strings" + + "github.com/elastic/elastic-agent-libs/transport/dialer" +) + +type hostData struct { + transport dialer.Builder // The transport builder to use when creating the connection. + + uri string // The full URI that should be used in connections. + sanitizedURI string // A sanitized version of the URI without credentials. + + // Parts of the URI. + host string // The host and possibly port. + user string // Username + password string // Password +} + +// ParseURL returns hostData object from a raw 'host' value and a series of +// defaults that are added to the URL if not present in the rawHost value. +// Values from the rawHost take precedence over the defaults. +func parseURL(rawHost, scheme, user, pass, path, query string) (hostData, error) { + u, transport, err := getURL(rawHost, scheme, user, pass, path, query) + if err != nil { + return hostData{}, err + } + + return newHostDataFromURLWithTransport(transport, u), nil +} + +// NewHostDataFromURLWithTransport Allow to specify what kind of transport to in conjunction of the +// url, this is useful if you use a combined scheme like "http+unix://" or "http+npipe". +func newHostDataFromURLWithTransport(transport dialer.Builder, u *url.URL) hostData { + var user, pass string + if u.User != nil { + user = u.User.Username() + pass, _ = u.User.Password() + } + + host := u.Host + if strings.HasSuffix(u.Scheme, "unix") || strings.HasSuffix(u.Scheme, "npipe") { + host = u.Path + } + + return hostData{ + transport: transport, + uri: u.String(), + sanitizedURI: redactURLCredentials(u).String(), + host: host, + user: user, + password: pass, + } +} + +// getURL constructs a URL from the rawHost value and adds the provided user, +// password, path, and query params if one was not set in the rawURL value. +func getURL( + rawURL, scheme, username, password, path, query string, +) (*url.URL, dialer.Builder, error) { + + if parts := strings.SplitN(rawURL, "://", 2); len(parts) != 2 { + // Add scheme. + rawURL = fmt.Sprintf("%s://%s", scheme, rawURL) + } + + var t dialer.Builder + + u, err := url.Parse(rawURL) + if err != nil { + return nil, t, fmt.Errorf("error parsing URL: %w", err) + } + + // discover the transport to use to communicate with the host if we have a combined scheme. + // possible values are mb.TransportTCP, mb.transportUnix or mb.TransportNpipe. + switch u.Scheme { + case "http+unix": + t = dialer.NewUnixDialerBuilder(u.Path) + u.Path = "" + u.Scheme = "http" //nolint:goconst // it's not worth making it const, name of http will not change + u.Host = "unix" + case "http+npipe": + p := u.Path + u.Path = "" + u.Scheme = "http" + u.Host = "npipe" + + if p == "" && u.Host != "" { + p = u.Host + } + + // cleanup of all possible prefixes + p = strings.TrimPrefix(p, "/pipe") + p = strings.TrimPrefix(p, `\\.\pipe`) + p = strings.TrimPrefix(p, "\\") + p = strings.TrimPrefix(p, "/") + + segs := strings.SplitAfterN(p, "/", 2) + if len(segs) == 2 { + p = strings.TrimSuffix(segs[0], "/") + u.Path = "/" + segs[1] + } + + p = `\\.\pipe\` + strings.Replace(p, "/", "\\", -1) + t = dialer.NewNpipeDialerBuilder(p) + default: + t = dialer.NewDefaultDialerBuilder() + } + + setURLUser(u, username, password) + + if !strings.HasSuffix(u.Scheme, "unix") && !strings.HasSuffix(u.Scheme, "npipe") { + if u.Host == "" { + return nil, t, fmt.Errorf("error parsing URL: empty host") + } + + // Validate the host. The port is optional. + host, _, err := net.SplitHostPort(u.Host) + if err != nil { + if strings.Contains(err.Error(), "missing port") { + host = u.Host + } else { + return nil, t, fmt.Errorf("error parsing URL: %w", err) + } + } + if host == "" { + return nil, t, fmt.Errorf("error parsing URL: empty host") + } + } + + if u.Path == "" && path != "" { + // The path given in the host config takes precedence over the + // default path. + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + u.Path = path + } + + // Adds the query params in the url + u, err = setQueryParams(u, query) + return u, t, err +} + +// setURLUser set the user credentials in the given URL. If the username or +// password is not set in the URL then the default is used (if provided). +func setURLUser(u *url.URL, defaultUser, defaultPass string) { + var user, pass string + var userIsSet, passIsSet bool + if u.User != nil { + user = u.User.Username() + if user != "" { + userIsSet = true + } + pass, passIsSet = u.User.Password() + } + + if !userIsSet && defaultUser != "" { + userIsSet = true + user = defaultUser + } + + if !passIsSet && defaultPass != "" { + passIsSet = true + pass = defaultPass + } + + if passIsSet { + u.User = url.UserPassword(user, pass) + } else if userIsSet { + u.User = url.User(user) + } +} + +// setQueryParams adds the query params to existing query parameters overwriting any +// keys that already exist. +func setQueryParams(u *url.URL, query string) (*url.URL, error) { + q := u.Query() + params, err := url.ParseQuery(query) + if err != nil { + return u, err + } + for key, values := range params { + for _, v := range values { + q.Set(key, v) + } + } + u.RawQuery = q.Encode() + return u, nil + +} + +// redactURLCredentials returns the URL as a string with the username and +// password redacted. +func redactURLCredentials(u *url.URL) *url.URL { + redacted := *u + redacted.User = nil + return &redacted +} diff --git a/internal/pkg/agent/application/monitoring/v1_monitor.go b/internal/pkg/agent/application/monitoring/v1_monitor.go index 1d8f2750afd..a70ba023566 100644 --- a/internal/pkg/agent/application/monitoring/v1_monitor.go +++ b/internal/pkg/agent/application/monitoring/v1_monitor.go @@ -14,6 +14,8 @@ import ( "strings" "unicode" + "github.com/elastic/elastic-agent/pkg/component" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" @@ -54,11 +56,12 @@ const ( ) var ( - supportedComponents = []string{"filebeat", "metricbeat", "apm-server", "auditbeat", "cloudbeat", "endpoint-security", "fleet-server", "heartbeat", "osquerybeat", "packetbeat"} - supportedBeatsComponents = []string{"filebeat", "metricbeat", "auditbeat", "cloudbeat", "heartbeat", "osquerybeat", "packetbeat"} + errNoOuputPresent = errors.New("outputs not part of the config") + supportedMetricsComponents = []string{"filebeat", "metricbeat", "apm-server", "auditbeat", "cloudbeat", "cloud-defend", "fleet-server", "heartbeat", "osquerybeat", "packetbeat"} + supportedBeatsComponents = []string{"filebeat", "metricbeat", "apm-server", "fleet-server", "auditbeat", "cloudbeat", "heartbeat", "osquerybeat", "packetbeat"} ) -// Beats monitor is providing V1 monitoring support. +// BeatsMonitor is providing V1 monitoring support for metrics and logs for endpoint-security only. type BeatsMonitor struct { enabled bool // feature flag disabling whole v1 monitoring story config *monitoringConfig @@ -89,7 +92,8 @@ func (b *BeatsMonitor) Enabled() bool { // Reload refreshes monitoring configuration. func (b *BeatsMonitor) Reload(rawConfig *config.Config) error { - if !b.Enabled() { + if !b.enabled { + // it's disabled regardless of config return nil } @@ -100,7 +104,7 @@ func (b *BeatsMonitor) Reload(rawConfig *config.Config) error { } // MonitoringConfig adds monitoring inputs to a configuration based on retrieved list of components to run. -func (b *BeatsMonitor) MonitoringConfig(policy map[string]interface{}, componentIDToBinary map[string]string) (map[string]interface{}, error) { +func (b *BeatsMonitor) MonitoringConfig(policy map[string]interface{}, components []component.Component, componentIDToBinary map[string]string) (map[string]interface{}, error) { if !b.Enabled() { return nil, nil } @@ -124,15 +128,18 @@ func (b *BeatsMonitor) MonitoringConfig(policy map[string]interface{}, component cfg := make(map[string]interface{}) - if err := b.injectMonitoringOutput(policy, cfg, monitoringOutputName); err != nil { + if err := b.injectMonitoringOutput(policy, cfg, monitoringOutputName); err != nil && !errors.Is(err, errNoOuputPresent) { return nil, errors.New(err, "failed to inject monitoring output") + } else if errors.Is(err, errNoOuputPresent) { + // nothing to inject, no monitoring output + return nil, nil } // initializes inputs collection so injectors don't have to deal with it b.initInputs(cfg) if b.config.C.MonitorLogs { - if err := b.injectLogsInput(cfg, componentIDToBinary, monitoringOutput); err != nil { + if err := b.injectLogsInput(cfg, components, monitoringOutput); err != nil { return nil, errors.New(err, "failed to inject monitoring output") } } @@ -178,39 +185,28 @@ func (b *BeatsMonitor) EnrichArgs(unit, binary string, args []string) []string { } } - loggingPath := loggingPath(unit, b.operatingSystem) - if loggingPath != "" { + if !b.config.C.LogMetrics { appendix = append(appendix, - "-E", "logging.files.path="+filepath.Dir(loggingPath), - "-E", "logging.files.name="+filepath.Base(loggingPath), - "-E", "logging.files.keepfiles=7", - "-E", "logging.files.permission=0640", - "-E", "logging.files.interval=1h", + "-E", "logging.metrics.enabled=false", ) - - if !b.config.C.LogMetrics { - appendix = append(appendix, - "-E", "logging.metrics.enabled=false", - ) - } } return append(args, appendix...) } // Prepare executes steps in order for monitoring to work correctly -func (b *BeatsMonitor) Prepare() error { +func (b *BeatsMonitor) Prepare(unit string) error { if !b.Enabled() { return nil } drops := make([]string, 0, 2) if b.config.C.MonitorLogs { - logsDrop := loggingPath("unit", b.operatingSystem) + logsDrop := loggingPath(unit, b.operatingSystem) drops = append(drops, filepath.Dir(logsDrop)) } if b.config.C.MonitorMetrics { - metricsDrop := monitoringDrop(endpointPath("unit", b.operatingSystem)) + metricsDrop := monitoringDrop(endpointPath(unit, b.operatingSystem)) drops = append(drops, metricsDrop) } @@ -267,7 +263,7 @@ func (b *BeatsMonitor) initInputs(cfg map[string]interface{}) { func (b *BeatsMonitor) injectMonitoringOutput(source, dest map[string]interface{}, monitoringOutputName string) error { outputsNode, found := source[outputsKey] if !found { - return fmt.Errorf("outputs not part of the config") + return errNoOuputPresent } outputs, ok := outputsNode.(map[string]interface{}) @@ -289,24 +285,23 @@ func (b *BeatsMonitor) injectMonitoringOutput(source, dest map[string]interface{ return nil } -func (b *BeatsMonitor) injectLogsInput(cfg map[string]interface{}, componentIDToBinary map[string]string, monitoringOutput string) error { +func (b *BeatsMonitor) injectLogsInput(cfg map[string]interface{}, components []component.Component, monitoringOutput string) error { monitoringNamespace := b.monitoringNamespace() - //fixedAgentName := strings.ReplaceAll(agentName, "-", "_") logsDrop := filepath.Dir(loggingPath("unit", b.operatingSystem)) streams := []interface{}{ map[string]interface{}{ - idKey: "logs-monitoring-agent", + idKey: "filestream-monitoring-agent", + "type": "filestream", + "paths": []interface{}{ + filepath.Join(logsDrop, agentName+"-*.ndjson"), + filepath.Join(logsDrop, agentName+"-watcher-*.ndjson"), + }, "data_stream": map[string]interface{}{ "type": "logs", "dataset": "elastic_agent", "namespace": monitoringNamespace, }, - "paths": []interface{}{ - filepath.Join(logsDrop, agentName+"-*.ndjson"), - filepath.Join(logsDrop, agentName+"-watcher-*.ndjson"), - }, - "index": fmt.Sprintf("logs-elastic_agent-%s", monitoringNamespace), "close": map[string]interface{}{ "on_state_change": map[string]interface{}{ "inactive": "5m", @@ -315,75 +310,145 @@ func (b *BeatsMonitor) injectLogsInput(cfg map[string]interface{}, componentIDTo "parsers": []interface{}{ map[string]interface{}{ "ndjson": map[string]interface{}{ - "overwrite_keys": true, "message_key": "message", + "overwrite_keys": true, + "add_error_key": true, + "target": "", }, }, }, "processors": []interface{}{ + // drop all events from monitoring components (do it early) + // without dropping these events the filestream gets stuck in an infinite loop + // if filestream hits an issue publishing the events it logs an error which then filestream monitor + // will read from the logs and try to also publish that new log message (thus the infinite loop) map[string]interface{}{ - "add_fields": map[string]interface{}{ - "target": "data_stream", - "fields": map[string]interface{}{ - "type": "logs", - "dataset": "elastic_agent", - "namespace": monitoringNamespace, + "drop_event": map[string]interface{}{ + "when": map[string]interface{}{ + "or": []interface{}{ + map[string]interface{}{ + "equals": map[string]interface{}{ + "component.dataset": fmt.Sprintf("elastic_agent.filestream_%s", monitoringOutput), + }, + }, + // for consistency this monitor is also not shipped (fetch-able with diagnostics) + map[string]interface{}{ + "equals": map[string]interface{}{ + "component.dataset": fmt.Sprintf("elastic_agent.beats_metrics_%s", monitoringOutput), + }, + }, + // for consistency with this monitor is also not shipped (fetch-able with diagnostics) + map[string]interface{}{ + "equals": map[string]interface{}{ + "component.dataset": fmt.Sprintf("elastic_agent.http_metrics_%s", monitoringOutput), + }, + }, + }, }, }, }, + // copy original dataset so we can drop the dataset field map[string]interface{}{ - "add_fields": map[string]interface{}{ - "target": "event", - "fields": map[string]interface{}{ - "dataset": "elastic_agent", + "copy_fields": map[string]interface{}{ + "fields": []interface{}{ + map[string]interface{}{ + "from": "data_stream.dataset", + "to": "data_stream.dataset_original", + }, }, }, }, + // drop the dataset field so following copy_field can copy to it map[string]interface{}{ - "add_fields": map[string]interface{}{ - "target": "elastic_agent", - "fields": map[string]interface{}{ - "id": b.agentInfo.AgentID(), - "version": b.agentInfo.Version(), - "snapshot": b.agentInfo.Snapshot(), + "drop_fields": map[string]interface{}{ + "fields": []interface{}{ + "data_stream.dataset", }, }, }, + // copy component.dataset as the real dataset map[string]interface{}{ - "add_fields": map[string]interface{}{ - "target": "agent", - "fields": map[string]interface{}{ - "id": b.agentInfo.AgentID(), + "copy_fields": map[string]interface{}{ + "fields": []interface{}{ + map[string]interface{}{ + "from": "component.dataset", + "to": "data_stream.dataset", + }, + }, + "fail_on_error": false, + "ignore_missing": true, + }, + }, + // possible it's a log message from agent itself (doesn't have component.dataset) + map[string]interface{}{ + "copy_fields": map[string]interface{}{ + "fields": []interface{}{ + map[string]interface{}{ + "from": "data_stream.dataset_original", + "to": "data_stream.dataset", + }, + }, + "fail_on_error": false, + }, + }, + // drop the original dataset copied and the event.dataset (as it will be updated) + map[string]interface{}{ + "drop_fields": map[string]interface{}{ + "fields": []interface{}{ + "data_stream.dataset_original", + "event.dataset", + }, + }, + }, + // update event.dataset with the now used data_stream.dataset + map[string]interface{}{ + "copy_fields": map[string]interface{}{ + "fields": []interface{}{ + map[string]interface{}{ + "from": "data_stream.dataset", + "to": "event.dataset", + }, }, }, }, + // coming from logger, added by agent (drop) map[string]interface{}{ "drop_fields": map[string]interface{}{ "fields": []interface{}{ - "ecs.version", //coming from logger, already added by libbeat + "ecs.version", }, "ignore_missing": true, }, + }, + // adjust destination data_stream based on the data_stream fields + map[string]interface{}{ + "add_formatted_index": map[string]interface{}{ + "index": "%{[data_stream.type]}-%{[data_stream.dataset]}-%{[data_stream.namespace]}", + }, }}, }, } - for unit, binaryName := range componentIDToBinary { - if !isSupportedBinary(binaryName) { + + // service components that define a log path are monitored using its own stream in the monitor + for _, comp := range components { + if comp.InputSpec == nil || comp.InputSpec.Spec.Service == nil || comp.InputSpec.Spec.Service.Log == nil || comp.InputSpec.Spec.Service.Log.Path == "" { + // only monitor service inputs that define a log path continue } - fixedBinaryName := strings.ReplaceAll(binaryName, "-", "_") - name := strings.ReplaceAll(unit, "-", "_") // conform with index naming policy - logFile := loggingPath(unit, b.operatingSystem) + fixedBinaryName := strings.ReplaceAll(strings.ReplaceAll(comp.InputSpec.BinaryName, "-", "_"), "/", "_") // conform with index naming policy + dataset := fmt.Sprintf("elastic_agent.%s", fixedBinaryName) streams = append(streams, map[string]interface{}{ - idKey: "logs-monitoring-" + name, + idKey: fmt.Sprintf("filestream-monitoring-%s", comp.ID), + "type": "filestream", + "paths": []interface{}{ + comp.InputSpec.Spec.Service.Log.Path, + }, "data_stream": map[string]interface{}{ "type": "logs", - "dataset": fmt.Sprintf("elastic_agent.%s", fixedBinaryName), + "dataset": dataset, "namespace": monitoringNamespace, }, - "index": fmt.Sprintf("logs-elastic_agent.%s-%s", fixedBinaryName, monitoringNamespace), - "paths": []interface{}{logFile, logFile + "*"}, "close": map[string]interface{}{ "on_state_change": map[string]interface{}{ "inactive": "5m", @@ -392,70 +457,37 @@ func (b *BeatsMonitor) injectLogsInput(cfg map[string]interface{}, componentIDTo "parsers": []interface{}{ map[string]interface{}{ "ndjson": map[string]interface{}{ - "overwrite_keys": true, "message_key": "message", + "overwrite_keys": true, + "add_error_key": true, + "target": "", }, }, }, "processors": []interface{}{ map[string]interface{}{ + // component information must be injected because it's not a subprocess "add_fields": map[string]interface{}{ - "target": "data_stream", - "fields": map[string]interface{}{ - "type": "logs", - "dataset": fmt.Sprintf("elastic_agent.%s", fixedBinaryName), - "namespace": monitoringNamespace, - }, - }, - }, - map[string]interface{}{ - "add_fields": map[string]interface{}{ - "target": "event", - "fields": map[string]interface{}{ - "dataset": fmt.Sprintf("elastic_agent.%s", fixedBinaryName), - }, - }, - }, - map[string]interface{}{ - "add_fields": map[string]interface{}{ - "target": "elastic_agent", + "target": "component", "fields": map[string]interface{}{ - "id": b.agentInfo.AgentID(), - "version": b.agentInfo.Version(), - "snapshot": b.agentInfo.Snapshot(), - }, - }, - }, - map[string]interface{}{ - "add_fields": map[string]interface{}{ - "target": "agent", - "fields": map[string]interface{}{ - "id": b.agentInfo.AgentID(), + "id": comp.ID, + "type": comp.InputSpec.InputType, + "binary": comp.InputSpec.BinaryName, + "dataset": dataset, }, }, }, - map[string]interface{}{ - "drop_fields": map[string]interface{}{ - "fields": []interface{}{ - "ecs.version", //coming from logger, already added by libbeat - }, - "ignore_missing": true, - }, - }, }, }) } inputs := []interface{}{ map[string]interface{}{ - idKey: "logs-monitoring-agent", - "name": "logs-monitoring-agent", + idKey: "filestream-monitoring-agent", + "name": "filestream-monitoring-agent", "type": "filestream", useOutputKey: monitoringOutput, - "data_stream": map[string]interface{}{ - "namespace": monitoringNamespace, - }, - "streams": streams, + "streams": streams, }, } inputsNode, found := cfg[inputsKey] @@ -554,12 +586,12 @@ func (b *BeatsMonitor) injectMetricsInput(cfg map[string]interface{}, componentI }, } for unit, binaryName := range componentIDToBinary { - if !isSupportedBinary(binaryName) { + if !isSupportedMetricsBinary(binaryName) { continue } endpoints := []interface{}{prefixedEndpoint(endpointPath(unit, b.operatingSystem))} - name := strings.ReplaceAll(unit, "-", "_") // conform with index naming policy + name := strings.ReplaceAll(strings.ReplaceAll(binaryName, "-", "_"), "/", "_") // conform with index naming policy if isSupportedBeatsBinary(binaryName) { beatsStreams = append(beatsStreams, map[string]interface{}{ @@ -572,7 +604,7 @@ func (b *BeatsMonitor) injectMetricsInput(cfg map[string]interface{}, componentI "metricsets": []interface{}{"stats", "state"}, "hosts": endpoints, "period": "10s", - "index": fmt.Sprintf("metrics-elastic_agent.%s-%s", fixedAgentName, monitoringNamespace), + "index": fmt.Sprintf("metrics-elastic_agent.%s-%s", name, monitoringNamespace), "processors": []interface{}{ map[string]interface{}{ "add_fields": map[string]interface{}{ @@ -723,12 +755,18 @@ func loggingPath(id, operatingSystem string) string { } func endpointPath(id, operatingSystem string) (endpointPath string) { - id = strings.ReplaceAll(id, string(filepath.Separator), "-") + return endpointPathWithDir(id, operatingSystem, paths.TempDir(), string(filepath.Separator)) +} + +func endpointPathWithDir(id, operatingSystem, tempDir, separator string) (endpointPath string) { + id = strings.ReplaceAll(id, separator, "-") if operatingSystem == windowsOS { + // on windows named pipe `/` separates pipe name from a computer/server name + id = strings.ReplaceAll(id, "/", "-") return fmt.Sprintf(mbEndpointFileFormatWin, id) } // unix socket path must be less than 104 characters - path := fmt.Sprintf("unix://%s.sock", filepath.Join(paths.TempDir(), id)) + path := fmt.Sprintf("unix://%s.sock", filepath.Join(tempDir, id)) if len(path) < 104 { return path } @@ -853,8 +891,8 @@ func httpCopyRules() []interface{} { return fromToMap } -func isSupportedBinary(binaryName string) bool { - for _, supportedBinary := range supportedComponents { +func isSupportedMetricsBinary(binaryName string) bool { + for _, supportedBinary := range supportedMetricsComponents { if strings.EqualFold(supportedBinary, binaryName) { return true } diff --git a/internal/pkg/agent/application/monitoring/v1_monitor_test.go b/internal/pkg/agent/application/monitoring/v1_monitor_test.go new file mode 100644 index 00000000000..1729b2224ac --- /dev/null +++ b/internal/pkg/agent/application/monitoring/v1_monitor_test.go @@ -0,0 +1,54 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package monitoring + +import ( + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestEndpointPath(t *testing.T) { + sep := string(filepath.Separator) + testCases := []struct { + Name string + OS string + ID string + ExpectedID string + }{ + // using filepath join so windows runner is happy, filepath is used internally. + // simple + {"simple linux", "linux", "simple", "unix://" + sep + filepath.Join("tmp", "elastic-agent", "simple.sock")}, + {"simple darwin", "darwin", "simple", "unix://" + sep + filepath.Join("tmp", "elastic-agent", "simple.sock")}, + {"simple windows", "windows", "simple", "npipe:///simple"}, + + // special chars + {"simple linux", "linux", "complex43@#$", "unix://" + sep + filepath.Join("tmp", "elastic-agent", "complex43@#$.sock")}, + {"simple darwin", "darwin", "complex43@#$", "unix://" + sep + filepath.Join("tmp", "elastic-agent", "complex43@#$.sock")}, + {"simple windows", "windows", "complex43@#$", "npipe:///complex43@#$"}, + + // slash + {"simple linux", "linux", "slash/sample", "unix://" + sep + filepath.Join("tmp", "elastic-agent", "slash-sample.sock")}, + {"simple darwin", "darwin", "slash/sample", "unix://" + sep + filepath.Join("tmp", "elastic-agent", "slash-sample.sock")}, + {"simple windows", "windows", "slash/sample", "npipe:///slash-sample"}, + + // backslash + {"simple linux", "linux", "back\\slash", "unix://" + sep + filepath.Join("tmp", "elastic-agent", "back\\slash.sock")}, + {"simple darwin", "darwin", "back\\slash", "unix://" + sep + filepath.Join("tmp", "elastic-agent", "back\\slash.sock")}, + {"simple windows", "windows", "back\\slash", "npipe:///back-slash"}, + } + + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + separator := "/" + if tc.OS == windowsOS { + separator = "\\" + } + endpointPath := endpointPathWithDir(tc.ID, tc.OS, "/tmp/elastic-agent", separator) + require.Equal(t, tc.ExpectedID, endpointPath) + }) + } +} diff --git a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go index 2a09c65e522..f36dee46eb9 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go @@ -130,12 +130,18 @@ func snapshotURI(versionOverride string, config *artifact.Config) (string, error return "", fmt.Errorf("uri is not a string") } + // Because we're iterating over a map from the API response, + // the order is random and some elements there do not contain the + // `/beats/elastic-agent/` substring, so we need to go through the + // whole map before returning an error. + // + // One of the elements that might be there and do not contain this + // substring is the `elastic-agent-shipper`, whose URL is something like: + // https://snapshots.elastic.co/8.7.0-d050210c/downloads/elastic-agent-shipper/elastic-agent-shipper-8.7.0-SNAPSHOT-linux-x86_64.tar.gz index := strings.Index(uri, "/beats/elastic-agent/") - if index == -1 { - return "", fmt.Errorf("not an agent uri: '%s'", uri) + if index != -1 { + return uri[:index], nil } - - return uri[:index], nil } return "", fmt.Errorf("uri not detected") diff --git a/internal/pkg/agent/application/upgrade/error_checker.go b/internal/pkg/agent/application/upgrade/error_checker.go index 8e308c4e080..a393be1cb2a 100644 --- a/internal/pkg/agent/application/upgrade/error_checker.go +++ b/internal/pkg/agent/application/upgrade/error_checker.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/elastic/elastic-agent/internal/pkg/agent/control" - "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" + "github.com/elastic/elastic-agent/internal/pkg/agent/control/v2/client" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/pkg/core/logger" ) diff --git a/internal/pkg/agent/application/upgrade/rollback.go b/internal/pkg/agent/application/upgrade/rollback.go index b4f6014fb3d..19872de2a4c 100644 --- a/internal/pkg/agent/application/upgrade/rollback.go +++ b/internal/pkg/agent/application/upgrade/rollback.go @@ -16,7 +16,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/control" - "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" + "github.com/elastic/elastic-agent/internal/pkg/agent/control/v2/client" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/install" "github.com/elastic/elastic-agent/internal/pkg/core/backoff" @@ -114,7 +114,7 @@ func InvokeWatcher(log *logger.Logger) error { defer func() { if cmd.Process != nil { log.Debugf("releasing watcher %v", cmd.Process.Pid) - cmd.Process.Release() + _ = cmd.Process.Release() } }() diff --git a/internal/pkg/agent/application/upgrade/upgrade.go b/internal/pkg/agent/application/upgrade/upgrade.go index e4ef8c6066f..3fe834df3d2 100644 --- a/internal/pkg/agent/application/upgrade/upgrade.go +++ b/internal/pkg/agent/application/upgrade/upgrade.go @@ -195,12 +195,17 @@ func (u *Upgrader) Ack(ctx context.Context, acker acker.Acker) error { return nil } - if err := acker.Ack(ctx, marker.Action); err != nil { - return err - } + // Action can be nil if the upgrade was called locally. + // Should handle gracefully + // https://github.com/elastic/elastic-agent/issues/1788 + if marker.Action != nil { + if err := acker.Ack(ctx, marker.Action); err != nil { + return err + } - if err := acker.Commit(ctx); err != nil { - return err + if err := acker.Commit(ctx); err != nil { + return err + } } marker.Acked = true diff --git a/internal/pkg/agent/cmd/common.go b/internal/pkg/agent/cmd/common.go index 7639b345ff7..dcdf800683f 100644 --- a/internal/pkg/agent/cmd/common.go +++ b/internal/pkg/agent/cmd/common.go @@ -35,6 +35,9 @@ func NewCommand() *cobra.Command { func NewCommandWithArgs(args []string, streams *cli.IOStreams) *cobra.Command { cmd := &cobra.Command{ Use: "elastic-agent [subcommand]", + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + return tryContainerLoadPaths() + }, } // path flags diff --git a/internal/pkg/agent/cmd/container.go b/internal/pkg/agent/cmd/container.go index 91c755bedfc..2e14bfec3e7 100644 --- a/internal/pkg/agent/cmd/container.go +++ b/internal/pkg/agent/cmd/container.go @@ -208,7 +208,7 @@ func containerCmd(streams *cli.IOStreams) error { if err != nil { return errors.New(err, "finding current process") } - if apmProc, err = runLegacyAPMServer(streams, apmPath); err != nil { + if apmProc, err = runLegacyAPMServer(streams); err != nil { return errors.New(err, "starting legacy apm-server") } wg.Add(1) // apm-server legacy process @@ -701,8 +701,8 @@ func truncateString(b []byte) string { // runLegacyAPMServer extracts the bundled apm-server from elastic-agent // to path and runs it with args. -func runLegacyAPMServer(streams *cli.IOStreams, path string) (*process.Info, error) { - name := "apm-server" +func runLegacyAPMServer(streams *cli.IOStreams) (*process.Info, error) { + name := "apm" logInfo(streams, "Preparing apm-server for legacy mode.") platform, err := component.LoadPlatformDetail(isContainer) @@ -720,15 +720,6 @@ func runLegacyAPMServer(streams *cli.IOStreams, path string) (*process.Info, err return nil, fmt.Errorf("failed to detect apm-server input: %w", err) } - // Get the apm-server directory - files, err := os.ReadDir(path) - if err != nil { - return nil, errors.New(err, fmt.Sprintf("reading directory %s", path)) - } - if len(files) != 1 || !files[0].IsDir() { - return nil, errors.New("expected one directory") - } - // add APM Server specific configuration var args []string addEnv := func(arg, env string) { @@ -748,8 +739,16 @@ func runLegacyAPMServer(streams *cli.IOStreams, path string) (*process.Info, err addEnv("--path.logs", "LOGS_PATH") addEnv("--httpprof", "HTTPPROF") addSettingEnv("gc_percent", "APMSERVER_GOGC") - logInfo(streams, "Starting legacy apm-server daemon as a subprocess.") - return process.Start(spec.BinaryPath, process.WithArgs(args)) + logInfo(streams, "Starting legacy apm-server daemon as a subprocess."+spec.BinaryPath) + options := []process.StartOption{process.WithArgs(args)} + wdir := filepath.Dir(spec.BinaryPath) + if wdir != "." { + options = append(options, process.WithCmdOptions(func(c *exec.Cmd) error { + c.Dir = wdir + return nil + })) + } + return process.Start(spec.BinaryPath, options...) } func logToStderr(cfg *configuration.Configuration) { diff --git a/internal/pkg/agent/cmd/diagnostics.go b/internal/pkg/agent/cmd/diagnostics.go index 9fab842375e..2b07537a920 100644 --- a/internal/pkg/agent/cmd/diagnostics.go +++ b/internal/pkg/agent/cmd/diagnostics.go @@ -13,18 +13,24 @@ import ( "io/fs" "os" "path/filepath" + "reflect" "strings" "time" "github.com/hashicorp/go-multierror" "github.com/spf13/cobra" + "gopkg.in/yaml.v2" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" + "github.com/elastic/elastic-agent/internal/pkg/agent/control/v2/client" "github.com/elastic/elastic-agent/internal/pkg/cli" "github.com/elastic/elastic-agent/pkg/component" ) +const ( + REDACTED = "" +) + func newDiagnosticsCommand(_ []string, streams *cli.IOStreams) *cobra.Command { cmd := &cobra.Command{ Use: "diagnostics", @@ -50,15 +56,10 @@ func diagnosticCmd(streams *cli.IOStreams, cmd *cobra.Command) error { fileName = "elastic-agent-diagnostics-" + ts.Format("2006-01-02T15-04-05Z07-00") + ".zip" // RFC3339 format that replaces : with -, so it will work on Windows } - err := tryContainerLoadPaths() - if err != nil { - return err - } - ctx := handleSignal(context.Background()) daemon := client.New() - err = daemon.Connect(ctx) + err := daemon.Connect(ctx) if err != nil { return fmt.Errorf("failed to connect to daemon: %w", err) } @@ -74,7 +75,7 @@ func diagnosticCmd(streams *cli.IOStreams, cmd *cobra.Command) error { return fmt.Errorf("failed to fetch component/unit diagnostics: %w", err) } - err = createZip(fileName, agentDiag, unitDiags) + err = createZip(streams, fileName, agentDiag, unitDiags) if err != nil { return fmt.Errorf("unable to create archive %q: %w", fileName, err) } @@ -85,9 +86,9 @@ func diagnosticCmd(streams *cli.IOStreams, cmd *cobra.Command) error { // createZip creates a zip archive with the passed fileName. // -// The passed DiagnosticsInfo and AgentConfig data is written in the specified output format. +// The passed DiagnosticsInfo and AgentConfig data is written in the format supplied by the unit. // Any local log files are collected and copied into the archive. -func createZip(fileName string, agentDiag []client.DiagnosticFileResult, unitDiags []client.DiagnosticUnitResult) error { +func createZip(streams *cli.IOStreams, fileName string, agentDiag []client.DiagnosticFileResult, unitDiags []client.DiagnosticUnitResult) error { f, err := os.Create(fileName) if err != nil { return err @@ -100,7 +101,7 @@ func createZip(fileName string, agentDiag []client.DiagnosticFileResult, unitDia if err != nil { return closeHandlers(err, zw, f) } - _, err = zf.Write(ad.Content) + err = writeRedacted(streams, ad.Filename, ad, zf) if err != nil { return closeHandlers(err, zw, f) } @@ -142,11 +143,12 @@ func createZip(fileName string, agentDiag []client.DiagnosticFileResult, unitDia continue } for _, fr := range ud.Results { - w, err := zw.Create(fmt.Sprintf("components/%s/%s/%s", dirName, unitDir, fr.Name)) + fullFilePath := fmt.Sprintf("components/%s/%s/%s", dirName, unitDir, fr.Filename) + zf, err := zw.Create(fullFilePath) if err != nil { return closeHandlers(err, zw, f) } - _, err = w.Write(fr.Content) + err = writeRedacted(streams, fullFilePath, fr, zf) if err != nil { return closeHandlers(err, zw, f) } @@ -161,6 +163,71 @@ func createZip(fileName string, agentDiag []client.DiagnosticFileResult, unitDia return closeHandlers(nil, zw, f) } +func writeRedacted(streams *cli.IOStreams, fullFilePath string, fr client.DiagnosticFileResult, w io.Writer) error { + out := &fr.Content + + // Should we support json too? + if fr.ContentType == "application/yaml" { + unmarshalled := map[string]interface{}{} + err := yaml.Unmarshal(fr.Content, &unmarshalled) + if err != nil { + // Best effort, output a warning but still include the file + fmt.Fprintf(streams.Err, "[WARNING] Could not redact %s due to unmarshalling error: %s\n", fullFilePath, err) + } else { + redacted, err := yaml.Marshal(redactMap(unmarshalled)) + if err != nil { + // Best effort, output a warning but still include the file + fmt.Fprintf(streams.Err, "[WARNING] Could not redact %s due to marshalling error: %s\n", fullFilePath, err) + } else { + out = &redacted + } + } + } + + _, err := w.Write(*out) + return err +} + +func redactMap(m map[string]interface{}) map[string]interface{} { + for k, v := range m { + if v != nil && reflect.TypeOf(v).Kind() == reflect.Map { + v = redactMap(toMapStr(v)) + } + if redactKey(k) { + v = REDACTED + } + m[k] = v + } + return m +} + +func toMapStr(v interface{}) map[string]interface{} { + mm := map[string]interface{}{} + m, ok := v.(map[interface{}]interface{}) + if !ok { + return mm + } + + for k, v := range m { + mm[k.(string)] = v + } + return mm +} + +func redactKey(k string) bool { + // "routekey" shouldn't be redacted. + // Add any other exceptions here. + if k == "routekey" { + return false + } + + return strings.Contains(k, "certificate") || + strings.Contains(k, "passphrase") || + strings.Contains(k, "password") || + strings.Contains(k, "token") || + strings.Contains(k, "key") +} + // zipLogs walks paths.Logs() and copies the file structure into zw in "logs/" func zipLogs(zw *zip.Writer) error { _, err := zw.Create("logs/") diff --git a/internal/pkg/agent/cmd/enroll_cmd.go b/internal/pkg/agent/cmd/enroll_cmd.go index 805b8a47757..cdd72f1ce58 100644 --- a/internal/pkg/agent/cmd/enroll_cmd.go +++ b/internal/pkg/agent/cmd/enroll_cmd.go @@ -26,7 +26,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" + "github.com/elastic/elastic-agent/internal/pkg/agent/control/v2/client" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/install" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" @@ -230,6 +230,8 @@ func (c *enrollCmd) Execute(ctx context.Context, streams *cli.IOStreams) error { if localFleetServer { // Ensure that the agent does not use a proxy configuration // when connecting to the local fleet server. + // Note that when running fleet-server the enroll request will be sent to :8220, + // however when the agent is running afterwards requests will be sent to :8221 c.remoteConfig.Transport.Proxy.Disable = true } @@ -301,6 +303,9 @@ func (c *enrollCmd) writeDelayEnroll(streams *cli.IOStreams) error { func (c *enrollCmd) fleetServerBootstrap(ctx context.Context, persistentConfig map[string]interface{}) (string, error) { c.log.Debug("verifying communication with running Elastic Agent daemon") agentRunning := true + if c.options.FleetServer.InternalPort == 0 { + c.options.FleetServer.InternalPort = defaultFleetServerInternalPort + } _, err := getDaemonState(ctx) if err != nil { if !c.options.FleetServer.SpawnAgent { @@ -336,6 +341,7 @@ func (c *enrollCmd) fleetServerBootstrap(ctx context.Context, persistentConfig m if err != nil { return "", err } + c.options.FleetServer.InternalPort = fleetConfig.Server.InternalPort configToStore := map[string]interface{}{ "agent": agentConfig, @@ -545,6 +551,9 @@ func (c *enrollCmd) enroll(ctx context.Context, persistentConfig map[string]inte // use internal URL for future requests if c.options.InternalURL != "" { fleetConfig.Client.Host = c.options.InternalURL + // fleet-server will bind the internal listenter to localhost:8221 + // InternalURL is localhost:8221, however cert uses $HOSTNAME, so we need to disable hostname verification. + fleetConfig.Client.Transport.TLS.VerificationMode = tlscommon.VerifyCertificate } } diff --git a/internal/pkg/agent/cmd/inspect.go b/internal/pkg/agent/cmd/inspect.go index d933a8fe1bf..bfefd7a140b 100644 --- a/internal/pkg/agent/cmd/inspect.go +++ b/internal/pkg/agent/cmd/inspect.go @@ -126,11 +126,6 @@ type inspectConfigOpts struct { } func inspectConfig(ctx context.Context, cfgPath string, opts inspectConfigOpts, streams *cli.IOStreams) error { - err := tryContainerLoadPaths() - if err != nil { - return err - } - l, err := newErrorLogger() if err != nil { return err @@ -144,7 +139,7 @@ func inspectConfig(ctx context.Context, cfgPath string, opts inspectConfigOpts, if !opts.variables && !opts.includeMonitoring { return printConfig(fullCfg, l, streams) } - cfg, err := getConfigWithVariables(ctx, l, cfgPath, opts.variablesWait) + cfg, lvl, err := getConfigWithVariables(ctx, l, cfgPath, opts.variablesWait) if err != nil { return err } @@ -165,11 +160,11 @@ func inspectConfig(ctx context.Context, cfgPath string, opts inspectConfigOpts, if err != nil { return fmt.Errorf("failed to get monitoring: %w", err) } - _, binaryMapping, err := specs.PolicyToComponents(cfg) + components, binaryMapping, err := specs.PolicyToComponents(cfg, lvl) if err != nil { return fmt.Errorf("failed to get binary mappings: %w", err) } - monitorCfg, err := monitorFn(cfg, binaryMapping) + monitorCfg, err := monitorFn(cfg, components, binaryMapping) if err != nil { return fmt.Errorf("failed to get monitoring config: %w", err) } @@ -236,12 +231,6 @@ func inspectComponents(ctx context.Context, cfgPath string, opts inspectComponen return err } - // Ensure that when running inside a container that the correct paths are used. - err = tryContainerLoadPaths() - if err != nil { - return err - } - // Load the requirements before trying to load the configuration. These should always load // even if the configuration is wrong. platform, err := component.LoadPlatformDetail() @@ -253,7 +242,7 @@ func inspectComponents(ctx context.Context, cfgPath string, opts inspectComponen return fmt.Errorf("failed to detect inputs and outputs: %w", err) } - m, err := getConfigWithVariables(ctx, l, cfgPath, opts.variablesWait) + m, lvl, err := getConfigWithVariables(ctx, l, cfgPath, opts.variablesWait) if err != nil { return err } @@ -264,7 +253,7 @@ func inspectComponents(ctx context.Context, cfgPath string, opts inspectComponen } // Compute the components from the computed configuration. - comps, err := specs.ToComponents(m, monitorFn) + comps, err := specs.ToComponents(m, monitorFn, lvl) if err != nil { return fmt.Errorf("failed to render components: %w", err) } @@ -344,39 +333,43 @@ func getMonitoringFn(cfg map[string]interface{}) (component.GenerateMonitoringCf return monitor.MonitoringConfig, nil } -func getConfigWithVariables(ctx context.Context, l *logger.Logger, cfgPath string, timeout time.Duration) (map[string]interface{}, error) { +func getConfigWithVariables(ctx context.Context, l *logger.Logger, cfgPath string, timeout time.Duration) (map[string]interface{}, logp.Level, error) { caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), l) if err != nil { - return nil, fmt.Errorf("failed to determine capabilities: %w", err) + return nil, logp.InfoLevel, fmt.Errorf("failed to determine capabilities: %w", err) } cfg, err := operations.LoadFullAgentConfig(l, cfgPath, true) if err != nil { - return nil, err + return nil, logp.InfoLevel, err + } + lvl, err := getLogLevel(cfg, cfgPath) + if err != nil { + return nil, logp.InfoLevel, err } m, err := cfg.ToMapStr() if err != nil { - return nil, err + return nil, lvl, err } ast, err := transpiler.NewAST(m) if err != nil { - return nil, fmt.Errorf("could not create the AST from the configuration: %w", err) + return nil, lvl, fmt.Errorf("could not create the AST from the configuration: %w", err) } var ok bool updatedAst, err := caps.Apply(ast) if err != nil { - return nil, fmt.Errorf("failed to apply capabilities: %w", err) + return nil, lvl, fmt.Errorf("failed to apply capabilities: %w", err) } ast, ok = updatedAst.(*transpiler.AST) if !ok { - return nil, fmt.Errorf("failed to transform object returned from capabilities to AST: %w", err) + return nil, lvl, fmt.Errorf("failed to transform object returned from capabilities to AST: %w", err) } // Wait for the variables based on the timeout. vars, err := vars.WaitForVariables(ctx, l, cfg, timeout) if err != nil { - return nil, fmt.Errorf("failed to gather variables: %w", err) + return nil, lvl, fmt.Errorf("failed to gather variables: %w", err) } // Render the inputs using the discovered inputs. @@ -384,18 +377,32 @@ func getConfigWithVariables(ctx context.Context, l *logger.Logger, cfgPath strin if ok { renderedInputs, err := transpiler.RenderInputs(inputs, vars) if err != nil { - return nil, fmt.Errorf("rendering inputs failed: %w", err) + return nil, lvl, fmt.Errorf("rendering inputs failed: %w", err) } err = transpiler.Insert(ast, renderedInputs, "inputs") if err != nil { - return nil, fmt.Errorf("inserting rendered inputs failed: %w", err) + return nil, lvl, fmt.Errorf("inserting rendered inputs failed: %w", err) } } m, err = ast.Map() if err != nil { - return nil, fmt.Errorf("failed to convert ast to map[string]interface{}: %w", err) + return nil, lvl, fmt.Errorf("failed to convert ast to map[string]interface{}: %w", err) + } + return m, lvl, nil +} + +func getLogLevel(rawCfg *config.Config, cfgPath string) (logp.Level, error) { + cfg, err := configuration.NewFromConfig(rawCfg) + if err != nil { + return logger.DefaultLogLevel, errors.New(err, + fmt.Sprintf("could not parse configuration file %s", cfgPath), + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, cfgPath)) + } + if cfg.Settings.LoggingConfig != nil { + return cfg.Settings.LoggingConfig.Level, nil } - return m, nil + return logger.DefaultLogLevel, nil } func printComponents(components []component.Component, streams *cli.IOStreams) error { diff --git a/internal/pkg/agent/cmd/run.go b/internal/pkg/agent/cmd/run.go index e6f9ec8d0f7..d87d004c230 100644 --- a/internal/pkg/agent/cmd/run.go +++ b/internal/pkg/agent/cmd/run.go @@ -14,6 +14,8 @@ import ( "path/filepath" "syscall" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/spf13/cobra" "go.elastic.co/apm" apmtransport "go.elastic.co/apm/transport" @@ -25,6 +27,7 @@ import ( "github.com/elastic/elastic-agent-libs/service" "github.com/elastic/elastic-agent-system-metrics/report" "github.com/elastic/elastic-agent/internal/pkg/agent/application" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/application/filelock" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/monitoring" @@ -33,7 +36,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/control/server" + "github.com/elastic/elastic-agent/internal/pkg/agent/control/v2/server" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/cli" @@ -59,6 +62,10 @@ func newRunCommandWithArgs(_ []string, streams *cli.IOStreams) *cobra.Command { Run: func(_ *cobra.Command, _ []string) { if err := run(nil); err != nil && !errors.Is(err, context.Canceled) { fmt.Fprintf(streams.Err, "Error: %v\n%s\n", err, troubleshootMessage()) + + // TODO: remove it. os.Exit will be called on main and if it's called + // too early some goroutines with deferred functions related + // to the shutdown process might not run. os.Exit(1) } }, @@ -98,15 +105,19 @@ func run(override cfgOverrider, modifiers ...component.PlatformModifier) error { return err } - logger, err := logger.NewFromConfig("", cfg.Settings.LoggingConfig, true) + logLvl := logger.DefaultLogLevel + if cfg.Settings.LoggingConfig != nil { + logLvl = cfg.Settings.LoggingConfig.Level + } + l, err := logger.NewFromConfig("", cfg.Settings.LoggingConfig, true) if err != nil { return err } - cfg, err = tryDelayEnroll(ctx, logger, cfg, override) + cfg, err = tryDelayEnroll(ctx, l, cfg, override) if err != nil { err = errors.New(err, "failed to perform delayed enrollment") - logger.Error(err) + l.Error(err) return err } pathConfigFile := paths.AgentConfigFile() @@ -134,21 +145,33 @@ func run(override cfgOverrider, modifiers ...component.PlatformModifier) error { errors.M(errors.MetaKeyPath, pathConfigFile)) } + // Ensure that the log level now matches what is configured in the agentInfo. + if agentInfo.LogLevel() != "" { + var lvl logp.Level + err = lvl.Unpack(agentInfo.LogLevel()) + if err != nil { + l.Error(errors.New(err, "failed to parse agent information log level")) + } else { + logLvl = lvl + logger.SetLevel(lvl) + } + } + // initiate agent watcher - if err := upgrade.InvokeWatcher(logger); err != nil { + if err := upgrade.InvokeWatcher(l); err != nil { // we should not fail because watcher is not working - logger.Error(errors.New(err, "failed to invoke rollback watcher")) + l.Error(errors.New(err, "failed to invoke rollback watcher")) } if allowEmptyPgp, _ := release.PGP(); allowEmptyPgp { - logger.Info("Elastic Agent has been built with security disabled. Elastic Agent will not verify signatures of upgrade artifact.") + l.Info("Elastic Agent has been built with security disabled. Elastic Agent will not verify signatures of upgrade artifact.") } execPath, err := reexecPath() if err != nil { return err } - rexLogger := logger.Named("reexec") + rexLogger := l.Named("reexec") rex := reexec.NewManager(rexLogger, execPath) tracer, err := initTracer(agentName, release.Version(), cfg.Settings.MonitoringConfig) @@ -156,21 +179,21 @@ func run(override cfgOverrider, modifiers ...component.PlatformModifier) error { return fmt.Errorf("could not initiate APM tracer: %w", err) } if tracer != nil { - logger.Info("APM instrumentation enabled") + l.Info("APM instrumentation enabled") defer func() { tracer.Flush(nil) tracer.Close() }() } else { - logger.Info("APM instrumentation disabled") + l.Info("APM instrumentation disabled") } - coord, err := application.New(logger, agentInfo, rex, tracer, modifiers...) + coord, err := application.New(l, logLvl, agentInfo, rex, tracer, configuration.IsFleetServerBootstrap(cfg.Fleet), modifiers...) if err != nil { return err } - serverStopFn, err := setupMetrics(logger, cfg.Settings.DownloadConfig.OS(), cfg.Settings.MonitoringConfig, tracer) + serverStopFn, err := setupMetrics(l, cfg.Settings.DownloadConfig.OS(), cfg.Settings.MonitoringConfig, tracer, coord) if err != nil { return err } @@ -180,7 +203,7 @@ func run(override cfgOverrider, modifiers ...component.PlatformModifier) error { diagHooks := diagnostics.GlobalHooks() diagHooks = append(diagHooks, coord.DiagnosticHooks()...) - control := server.New(logger.Named("control"), agentInfo, coord, tracer, diagHooks) + control := server.New(l.Named("control"), agentInfo, coord, tracer, diagHooks, cfg.Settings.GRPC) // start the control listener if err := control.Start(); err != nil { return err @@ -204,15 +227,19 @@ LOOP: for { select { case <-stop: + l.Info("service.HandleSignals invoked stop function. Shutting down") break LOOP case <-appDone: + l.Info("application done, coordinator exited") logShutdown = false break LOOP case <-rex.ShutdownChan(): + l.Info("reexec shutdown channel triggered") isRex = true logShutdown = false break LOOP case sig := <-signals: + l.Infof("signal %q received", sig) if sig == syscall.SIGHUP { rexLogger.Infof("SIGHUP triggered re-exec") isRex = true @@ -224,13 +251,13 @@ LOOP: } if logShutdown { - logger.Info("Shutting down Elastic Agent and sending last events...") + l.Info("Shutting down Elastic Agent and sending last events...") } cancel() err = <-appErr if logShutdown { - logger.Info("Shutting down completed.") + l.Info("Shutting down completed.") } if isRex { rex.ShutdownComplete() @@ -395,7 +422,7 @@ func initTracer(agentName, version string, mcfg *monitoringCfg.MonitoringConfig) cfg := mcfg.APM - // nolint:godox // the TODO is intentional + //nolint:godox // the TODO is intentional // TODO(stn): Ideally, we'd use apmtransport.NewHTTPTransportOptions() // but it doesn't exist today. Update this code once we have something // available via the APM Go agent. @@ -452,6 +479,7 @@ func setupMetrics( operatingSystem string, cfg *monitoringCfg.MonitoringConfig, tracer *apm.Tracer, + coord *coordinator.Coordinator, ) (func() error, error) { if err := report.SetupMetrics(logger, agentName, version.GetDefaultVersion()); err != nil { return nil, err @@ -463,7 +491,7 @@ func setupMetrics( Host: monitoring.AgentMonitoringEndpoint(operatingSystem, cfg), } - s, err := monitoring.NewServer(logger, endpointConfig, monitoringLib.GetNamespace, tracer) + s, err := monitoring.NewServer(logger, endpointConfig, monitoringLib.GetNamespace, tracer, coord, isProcessStatsEnabled(cfg), operatingSystem) if err != nil { return nil, errors.New(err, "could not start the HTTP server for the API") } @@ -472,3 +500,7 @@ func setupMetrics( // return server stopper return s.Stop, nil } + +func isProcessStatsEnabled(cfg *monitoringCfg.MonitoringConfig) bool { + return cfg != nil && cfg.HTTP.Enabled +} diff --git a/internal/pkg/agent/cmd/status.go b/internal/pkg/agent/cmd/status.go index 2f748e6dc89..71348e2dc47 100644 --- a/internal/pkg/agent/cmd/status.go +++ b/internal/pkg/agent/cmd/status.go @@ -17,7 +17,7 @@ import ( "github.com/spf13/cobra" - "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" + "github.com/elastic/elastic-agent/internal/pkg/agent/control/v2/client" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/cli" ) @@ -49,11 +49,6 @@ func newStatusCommand(_ []string, streams *cli.IOStreams) *cobra.Command { } func statusCmd(streams *cli.IOStreams, cmd *cobra.Command, args []string) error { - err := tryContainerLoadPaths() - if err != nil { - return err - } - output, _ := cmd.Flags().GetString("output") outputFunc, ok := statusOutputs[output] if !ok { diff --git a/internal/pkg/agent/cmd/upgrade.go b/internal/pkg/agent/cmd/upgrade.go index 5e5d75aeeba..7165e36a118 100644 --- a/internal/pkg/agent/cmd/upgrade.go +++ b/internal/pkg/agent/cmd/upgrade.go @@ -11,8 +11,8 @@ import ( "github.com/spf13/cobra" - "github.com/elastic/elastic-agent/internal/pkg/agent/control" - "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" + control "github.com/elastic/elastic-agent/internal/pkg/agent/control" + "github.com/elastic/elastic-agent/internal/pkg/agent/control/v2/client" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/cli" ) diff --git a/internal/pkg/agent/configuration/grpc.go b/internal/pkg/agent/configuration/grpc.go index 6624e6a0c08..28210edb979 100644 --- a/internal/pkg/agent/configuration/grpc.go +++ b/internal/pkg/agent/configuration/grpc.go @@ -8,15 +8,17 @@ import "fmt" // GRPCConfig is a configuration of GRPC server. type GRPCConfig struct { - Address string `config:"address"` - Port uint16 `config:"port"` + Address string `config:"address"` + Port uint16 `config:"port"` + MaxMsgSize int `config:"max_message_size"` } // DefaultGRPCConfig creates a default server configuration. func DefaultGRPCConfig() *GRPCConfig { return &GRPCConfig{ - Address: "localhost", - Port: 6789, + Address: "localhost", + Port: 6789, + MaxMsgSize: 1024 * 1024 * 100, // grpc default 4MB is unsufficient for diagnostics } } diff --git a/internal/pkg/agent/control/v1/client/client.go b/internal/pkg/agent/control/v1/client/client.go new file mode 100644 index 00000000000..529f3bd6247 --- /dev/null +++ b/internal/pkg/agent/control/v1/client/client.go @@ -0,0 +1,186 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package client + +import ( + "context" + "encoding/json" + "fmt" + "sync" + "time" + + "github.com/elastic/elastic-agent/internal/pkg/agent/control" + "github.com/elastic/elastic-agent/internal/pkg/agent/control/v1/proto" +) + +// Status is the status of the Elastic Agent +type Status = proto.Status + +const ( + // Starting is when the it is still starting. + Starting Status = proto.Status_V1_STARTING + // Configuring is when it is configuring. + Configuring Status = proto.Status_V1_CONFIGURING + // Healthy is when it is healthy. + Healthy Status = proto.Status_V1_HEALTHY + // Degraded is when it is degraded. + Degraded Status = proto.Status_V1_DEGRADED + // Failed is when it is failed. + Failed Status = proto.Status_V1_FAILED + // Stopping is when it is stopping. + Stopping Status = proto.Status_V1_STOPPING + // Upgrading is when it is upgrading. + Upgrading Status = proto.Status_V1_UPGRADING +) + +// Version is the current running version of the daemon. +type Version struct { + Version string + Commit string + BuildTime time.Time + Snapshot bool +} + +// ApplicationStatus is a status of an application managed by the Elastic Agent. +// TODO(Anderson): Implement sort.Interface and sort it. +type ApplicationStatus struct { + ID string + Name string + Status Status + Message string + Payload map[string]interface{} +} + +// AgentStatus is the current status of the Elastic Agent. +type AgentStatus struct { + Status Status + Message string + Applications []*ApplicationStatus +} + +// Client communicates to Elastic Agent through the control protocol. +type Client interface { + // Connect connects to the running Elastic Agent. + Connect(ctx context.Context) error + // Disconnect disconnects from the running Elastic Agent. + Disconnect() + // Version returns the current version of the running agent. + Version(ctx context.Context) (Version, error) + // Status returns the current status of the running agent. + Status(ctx context.Context) (*AgentStatus, error) + // Restart triggers restarting the current running daemon. + Restart(ctx context.Context) error + // Upgrade triggers upgrade of the current running daemon. + Upgrade(ctx context.Context, version string, sourceURI string) (string, error) +} + +// client manages the state and communication to the Elastic Agent. +type client struct { + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + client proto.ElasticAgentControlClient +} + +// New creates a client connection to Elastic Agent. +func New() Client { + return &client{} +} + +// Connect connects to the running Elastic Agent. +func (c *client) Connect(ctx context.Context) error { + c.ctx, c.cancel = context.WithCancel(ctx) + conn, err := dialContext(ctx) + if err != nil { + return err + } + c.client = proto.NewElasticAgentControlClient(conn) + return nil +} + +// Disconnect disconnects from the running Elastic Agent. +func (c *client) Disconnect() { + if c.cancel != nil { + c.cancel() + c.wg.Wait() + c.ctx = nil + c.cancel = nil + } +} + +// Version returns the current version of the running agent. +func (c *client) Version(ctx context.Context) (Version, error) { + res, err := c.client.Version(ctx, &proto.Empty{}) + if err != nil { + return Version{}, err + } + bt, err := time.Parse(control.TimeFormat(), res.BuildTime) + if err != nil { + return Version{}, err + } + return Version{ + Version: res.Version, + Commit: res.Commit, + BuildTime: bt, + Snapshot: res.Snapshot, + }, nil +} + +// Status returns the current status of the running agent. +func (c *client) Status(ctx context.Context) (*AgentStatus, error) { + res, err := c.client.Status(ctx, &proto.Empty{}) + if err != nil { + return nil, err + } + s := &AgentStatus{ + Status: res.Status, + Message: res.Message, + Applications: make([]*ApplicationStatus, len(res.Applications)), + } + for i, appRes := range res.Applications { + var payload map[string]interface{} + if appRes.Payload != "" { + err := json.Unmarshal([]byte(appRes.Payload), &payload) + if err != nil { + return nil, err + } + } + s.Applications[i] = &ApplicationStatus{ + ID: appRes.Id, + Name: appRes.Name, + Status: appRes.Status, + Message: appRes.Message, + Payload: payload, + } + } + return s, nil +} + +// Restart triggers restarting the current running daemon. +func (c *client) Restart(ctx context.Context) error { + res, err := c.client.Restart(ctx, &proto.Empty{}) + if err != nil { + return err + } + if res.Status == proto.ActionStatus_V1_FAILURE { + return fmt.Errorf(res.Error) + } + return nil +} + +// Upgrade triggers upgrade of the current running daemon. +func (c *client) Upgrade(ctx context.Context, version string, sourceURI string) (string, error) { + res, err := c.client.Upgrade(ctx, &proto.UpgradeRequest{ + Version: version, + SourceURI: sourceURI, + }) + if err != nil { + return "", err + } + if res.Status == proto.ActionStatus_V1_FAILURE { + return "", fmt.Errorf(res.Error) + } + return res.Version, nil +} diff --git a/internal/pkg/agent/control/client/dial.go b/internal/pkg/agent/control/v1/client/dial.go similarity index 73% rename from internal/pkg/agent/control/client/dial.go rename to internal/pkg/agent/control/v1/client/dial.go index 2b5d1cd0488..955315a9f34 100644 --- a/internal/pkg/agent/control/client/dial.go +++ b/internal/pkg/agent/control/v1/client/dial.go @@ -15,10 +15,16 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/control" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" ) func dialContext(ctx context.Context) (*grpc.ClientConn, error) { - return grpc.DialContext(ctx, strings.TrimPrefix(control.Address(), "unix://"), grpc.WithInsecure(), grpc.WithContextDialer(dialer)) + return grpc.DialContext( + ctx, + strings.TrimPrefix(control.Address(), "unix://"), + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithContextDialer(dialer), + ) } func dialer(ctx context.Context, addr string) (net.Conn, error) { diff --git a/internal/pkg/agent/control/client/dial_windows.go b/internal/pkg/agent/control/v1/client/dial_windows.go similarity index 76% rename from internal/pkg/agent/control/client/dial_windows.go rename to internal/pkg/agent/control/v1/client/dial_windows.go index 503c785fd8e..ef798535e3a 100644 --- a/internal/pkg/agent/control/client/dial_windows.go +++ b/internal/pkg/agent/control/v1/client/dial_windows.go @@ -12,13 +12,19 @@ import ( "net" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "github.com/elastic/elastic-agent-libs/api/npipe" "github.com/elastic/elastic-agent/internal/pkg/agent/control" ) func dialContext(ctx context.Context) (*grpc.ClientConn, error) { - return grpc.DialContext(ctx, control.Address(), grpc.WithInsecure(), grpc.WithContextDialer(dialer)) + return grpc.DialContext( + ctx, + control.Address(), + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithContextDialer(dialer), + ) } func dialer(ctx context.Context, addr string) (net.Conn, error) { diff --git a/internal/pkg/agent/control/v1/proto/control_v1.pb.go b/internal/pkg/agent/control/v1/proto/control_v1.pb.go new file mode 100644 index 00000000000..fd3902a4a6e --- /dev/null +++ b/internal/pkg/agent/control/v1/proto/control_v1.pb.go @@ -0,0 +1,838 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.9 +// source: control_v1.proto + +// proto namespace/package name is shared with elastic-agent-client +// we need to be careful with modifications to avoid name collisions +// proto is here to maintain backward compatibility and cannot be changed. +// elastic-agent-client namespace is likely change after 8.6 + +package proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Status codes for the current state. +type Status int32 + +const ( + Status_V1_STARTING Status = 0 + Status_V1_CONFIGURING Status = 1 + Status_V1_HEALTHY Status = 2 + Status_V1_DEGRADED Status = 3 + Status_V1_FAILED Status = 4 + Status_V1_STOPPING Status = 5 + Status_V1_UPGRADING Status = 6 + Status_V1_SROLLBACK Status = 7 +) + +// Enum value maps for Status. +var ( + Status_name = map[int32]string{ + 0: "V1_STARTING", + 1: "V1_CONFIGURING", + 2: "V1_HEALTHY", + 3: "V1_DEGRADED", + 4: "V1_FAILED", + 5: "V1_STOPPING", + 6: "V1_UPGRADING", + 7: "V1_SROLLBACK", + } + Status_value = map[string]int32{ + "V1_STARTING": 0, + "V1_CONFIGURING": 1, + "V1_HEALTHY": 2, + "V1_DEGRADED": 3, + "V1_FAILED": 4, + "V1_STOPPING": 5, + "V1_UPGRADING": 6, + "V1_SROLLBACK": 7, + } +) + +func (x Status) Enum() *Status { + p := new(Status) + *p = x + return p +} + +func (x Status) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Status) Descriptor() protoreflect.EnumDescriptor { + return file_control_v1_proto_enumTypes[0].Descriptor() +} + +func (Status) Type() protoreflect.EnumType { + return &file_control_v1_proto_enumTypes[0] +} + +func (x Status) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Status.Descriptor instead. +func (Status) EnumDescriptor() ([]byte, []int) { + return file_control_v1_proto_rawDescGZIP(), []int{0} +} + +// Action status codes for restart and upgrade response. +type ActionStatus int32 + +const ( + // Action was successful. + ActionStatus_V1_SUCCESS ActionStatus = 0 + // Action failed. + ActionStatus_V1_FAILURE ActionStatus = 1 +) + +// Enum value maps for ActionStatus. +var ( + ActionStatus_name = map[int32]string{ + 0: "V1_SUCCESS", + 1: "V1_FAILURE", + } + ActionStatus_value = map[string]int32{ + "V1_SUCCESS": 0, + "V1_FAILURE": 1, + } +) + +func (x ActionStatus) Enum() *ActionStatus { + p := new(ActionStatus) + *p = x + return p +} + +func (x ActionStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ActionStatus) Descriptor() protoreflect.EnumDescriptor { + return file_control_v1_proto_enumTypes[1].Descriptor() +} + +func (ActionStatus) Type() protoreflect.EnumType { + return &file_control_v1_proto_enumTypes[1] +} + +func (x ActionStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ActionStatus.Descriptor instead. +func (ActionStatus) EnumDescriptor() ([]byte, []int) { + return file_control_v1_proto_rawDescGZIP(), []int{1} +} + +// Empty message. +type Empty struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Empty) Reset() { + *x = Empty{} + if protoimpl.UnsafeEnabled { + mi := &file_control_v1_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Empty) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Empty) ProtoMessage() {} + +func (x *Empty) ProtoReflect() protoreflect.Message { + mi := &file_control_v1_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Empty.ProtoReflect.Descriptor instead. +func (*Empty) Descriptor() ([]byte, []int) { + return file_control_v1_proto_rawDescGZIP(), []int{0} +} + +// Version response message. +type VersionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Current running version. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Current running commit. + Commit string `protobuf:"bytes,2,opt,name=commit,proto3" json:"commit,omitempty"` + // Current running build time. + BuildTime string `protobuf:"bytes,3,opt,name=buildTime,proto3" json:"buildTime,omitempty"` + // Current running version is a snapshot. + Snapshot bool `protobuf:"varint,4,opt,name=snapshot,proto3" json:"snapshot,omitempty"` +} + +func (x *VersionResponse) Reset() { + *x = VersionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_control_v1_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VersionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VersionResponse) ProtoMessage() {} + +func (x *VersionResponse) ProtoReflect() protoreflect.Message { + mi := &file_control_v1_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VersionResponse.ProtoReflect.Descriptor instead. +func (*VersionResponse) Descriptor() ([]byte, []int) { + return file_control_v1_proto_rawDescGZIP(), []int{1} +} + +func (x *VersionResponse) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *VersionResponse) GetCommit() string { + if x != nil { + return x.Commit + } + return "" +} + +func (x *VersionResponse) GetBuildTime() string { + if x != nil { + return x.BuildTime + } + return "" +} + +func (x *VersionResponse) GetSnapshot() bool { + if x != nil { + return x.Snapshot + } + return false +} + +type RestartResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Response status. + Status ActionStatus `protobuf:"varint,1,opt,name=status,proto3,enum=proto.ActionStatus" json:"status,omitempty"` + // Error message when it fails to trigger restart. + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *RestartResponse) Reset() { + *x = RestartResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_control_v1_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RestartResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RestartResponse) ProtoMessage() {} + +func (x *RestartResponse) ProtoReflect() protoreflect.Message { + mi := &file_control_v1_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RestartResponse.ProtoReflect.Descriptor instead. +func (*RestartResponse) Descriptor() ([]byte, []int) { + return file_control_v1_proto_rawDescGZIP(), []int{2} +} + +func (x *RestartResponse) GetStatus() ActionStatus { + if x != nil { + return x.Status + } + return ActionStatus_V1_SUCCESS +} + +func (x *RestartResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +// Upgrade request message. +type UpgradeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // (Optional) Version to upgrade to. + // + // If not provided Elastic Agent will auto discover the latest version in the same major + // to upgrade to. If wanting to upgrade to a new major that major must be present in the + // this version field. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // (Optional) Use a different source URI then configured. + // + // If provided the upgrade process will use the provided sourceURI instead of the configured + // sourceURI in the configuration. + SourceURI string `protobuf:"bytes,2,opt,name=sourceURI,proto3" json:"sourceURI,omitempty"` +} + +func (x *UpgradeRequest) Reset() { + *x = UpgradeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_control_v1_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpgradeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpgradeRequest) ProtoMessage() {} + +func (x *UpgradeRequest) ProtoReflect() protoreflect.Message { + mi := &file_control_v1_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpgradeRequest.ProtoReflect.Descriptor instead. +func (*UpgradeRequest) Descriptor() ([]byte, []int) { + return file_control_v1_proto_rawDescGZIP(), []int{3} +} + +func (x *UpgradeRequest) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *UpgradeRequest) GetSourceURI() string { + if x != nil { + return x.SourceURI + } + return "" +} + +// A upgrade response message. +type UpgradeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Response status. + Status ActionStatus `protobuf:"varint,1,opt,name=status,proto3,enum=proto.ActionStatus" json:"status,omitempty"` + // Version that is being upgraded to. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + // Error message when it fails to trigger upgrade. + Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *UpgradeResponse) Reset() { + *x = UpgradeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_control_v1_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpgradeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpgradeResponse) ProtoMessage() {} + +func (x *UpgradeResponse) ProtoReflect() protoreflect.Message { + mi := &file_control_v1_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpgradeResponse.ProtoReflect.Descriptor instead. +func (*UpgradeResponse) Descriptor() ([]byte, []int) { + return file_control_v1_proto_rawDescGZIP(), []int{4} +} + +func (x *UpgradeResponse) GetStatus() ActionStatus { + if x != nil { + return x.Status + } + return ActionStatus_V1_SUCCESS +} + +func (x *UpgradeResponse) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *UpgradeResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +// Current status of the application in Elastic Agent. +type ApplicationStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unique application ID. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Application name. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Current status. + Status Status `protobuf:"varint,3,opt,name=status,proto3,enum=proto.Status" json:"status,omitempty"` + // Current status message. + Message string `protobuf:"bytes,4,opt,name=message,proto3" json:"message,omitempty"` + // Current status payload. + Payload string `protobuf:"bytes,5,opt,name=payload,proto3" json:"payload,omitempty"` +} + +func (x *ApplicationStatus) Reset() { + *x = ApplicationStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_control_v1_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplicationStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplicationStatus) ProtoMessage() {} + +func (x *ApplicationStatus) ProtoReflect() protoreflect.Message { + mi := &file_control_v1_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplicationStatus.ProtoReflect.Descriptor instead. +func (*ApplicationStatus) Descriptor() ([]byte, []int) { + return file_control_v1_proto_rawDescGZIP(), []int{5} +} + +func (x *ApplicationStatus) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *ApplicationStatus) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ApplicationStatus) GetStatus() Status { + if x != nil { + return x.Status + } + return Status_V1_STARTING +} + +func (x *ApplicationStatus) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *ApplicationStatus) GetPayload() string { + if x != nil { + return x.Payload + } + return "" +} + +// Status is the current status of Elastic Agent. +type StatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Overall status of Elastic Agent. + Status Status `protobuf:"varint,1,opt,name=status,proto3,enum=proto.Status" json:"status,omitempty"` + // Overall status message of Elastic Agent. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // Status of each application in Elastic Agent. + Applications []*ApplicationStatus `protobuf:"bytes,3,rep,name=applications,proto3" json:"applications,omitempty"` +} + +func (x *StatusResponse) Reset() { + *x = StatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_control_v1_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse) ProtoMessage() {} + +func (x *StatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_control_v1_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse.ProtoReflect.Descriptor instead. +func (*StatusResponse) Descriptor() ([]byte, []int) { + return file_control_v1_proto_rawDescGZIP(), []int{6} +} + +func (x *StatusResponse) GetStatus() Status { + if x != nil { + return x.Status + } + return Status_V1_STARTING +} + +func (x *StatusResponse) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *StatusResponse) GetApplications() []*ApplicationStatus { + if x != nil { + return x.Applications + } + return nil +} + +var File_control_v1_proto protoreflect.FileDescriptor + +var file_control_v1_proto_rawDesc = []byte{ + 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x5f, 0x76, 0x31, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x22, 0x7d, 0x0a, 0x0f, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x54, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x22, 0x54, 0x0a, 0x0f, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x48, 0x0a, 0x0e, 0x55, 0x70, 0x67, 0x72, 0x61, + 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x49, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, + 0x49, 0x22, 0x6e, 0x0a, 0x0f, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x22, 0x92, 0x01, 0x0a, 0x11, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x8f, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3c, 0x0a, 0x0c, 0x61, 0x70, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2a, 0x92, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x0f, 0x0a, 0x0b, 0x56, 0x31, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x49, + 0x4e, 0x47, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x56, 0x31, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, + 0x47, 0x55, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x56, 0x31, 0x5f, 0x48, + 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x56, 0x31, 0x5f, 0x44, + 0x45, 0x47, 0x52, 0x41, 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x56, 0x31, 0x5f, + 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0f, 0x0a, 0x0b, 0x56, 0x31, 0x5f, 0x53, + 0x54, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x56, 0x31, 0x5f, + 0x55, 0x50, 0x47, 0x52, 0x41, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x56, + 0x31, 0x5f, 0x53, 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x07, 0x2a, 0x2e, 0x0a, + 0x0c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0e, 0x0a, + 0x0a, 0x56, 0x31, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x0e, 0x0a, + 0x0a, 0x56, 0x31, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, 0x32, 0xe0, 0x01, + 0x0a, 0x13, 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x43, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x2f, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x0c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x0c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x15, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x12, 0x0c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, + 0x65, 0x12, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x42, 0x22, 0x5a, 0x1d, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3b, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_control_v1_proto_rawDescOnce sync.Once + file_control_v1_proto_rawDescData = file_control_v1_proto_rawDesc +) + +func file_control_v1_proto_rawDescGZIP() []byte { + file_control_v1_proto_rawDescOnce.Do(func() { + file_control_v1_proto_rawDescData = protoimpl.X.CompressGZIP(file_control_v1_proto_rawDescData) + }) + return file_control_v1_proto_rawDescData +} + +var file_control_v1_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_control_v1_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_control_v1_proto_goTypes = []interface{}{ + (Status)(0), // 0: proto.Status + (ActionStatus)(0), // 1: proto.ActionStatus + (*Empty)(nil), // 2: proto.Empty + (*VersionResponse)(nil), // 3: proto.VersionResponse + (*RestartResponse)(nil), // 4: proto.RestartResponse + (*UpgradeRequest)(nil), // 5: proto.UpgradeRequest + (*UpgradeResponse)(nil), // 6: proto.UpgradeResponse + (*ApplicationStatus)(nil), // 7: proto.ApplicationStatus + (*StatusResponse)(nil), // 8: proto.StatusResponse +} +var file_control_v1_proto_depIdxs = []int32{ + 1, // 0: proto.RestartResponse.status:type_name -> proto.ActionStatus + 1, // 1: proto.UpgradeResponse.status:type_name -> proto.ActionStatus + 0, // 2: proto.ApplicationStatus.status:type_name -> proto.Status + 0, // 3: proto.StatusResponse.status:type_name -> proto.Status + 7, // 4: proto.StatusResponse.applications:type_name -> proto.ApplicationStatus + 2, // 5: proto.ElasticAgentControl.Version:input_type -> proto.Empty + 2, // 6: proto.ElasticAgentControl.Status:input_type -> proto.Empty + 2, // 7: proto.ElasticAgentControl.Restart:input_type -> proto.Empty + 5, // 8: proto.ElasticAgentControl.Upgrade:input_type -> proto.UpgradeRequest + 3, // 9: proto.ElasticAgentControl.Version:output_type -> proto.VersionResponse + 8, // 10: proto.ElasticAgentControl.Status:output_type -> proto.StatusResponse + 4, // 11: proto.ElasticAgentControl.Restart:output_type -> proto.RestartResponse + 6, // 12: proto.ElasticAgentControl.Upgrade:output_type -> proto.UpgradeResponse + 9, // [9:13] is the sub-list for method output_type + 5, // [5:9] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_control_v1_proto_init() } +func file_control_v1_proto_init() { + if File_control_v1_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_control_v1_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_control_v1_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VersionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_control_v1_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RestartResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_control_v1_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpgradeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_control_v1_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpgradeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_control_v1_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplicationStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_control_v1_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_control_v1_proto_rawDesc, + NumEnums: 2, + NumMessages: 7, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_control_v1_proto_goTypes, + DependencyIndexes: file_control_v1_proto_depIdxs, + EnumInfos: file_control_v1_proto_enumTypes, + MessageInfos: file_control_v1_proto_msgTypes, + }.Build() + File_control_v1_proto = out.File + file_control_v1_proto_rawDesc = nil + file_control_v1_proto_goTypes = nil + file_control_v1_proto_depIdxs = nil +} diff --git a/internal/pkg/agent/control/v1/proto/control_v1_grpc.pb.go b/internal/pkg/agent/control/v1/proto/control_v1_grpc.pb.go new file mode 100644 index 00000000000..6264d2d81bf --- /dev/null +++ b/internal/pkg/agent/control/v1/proto/control_v1_grpc.pb.go @@ -0,0 +1,221 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.21.9 +// source: control_v1.proto + +package proto + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// ElasticAgentControlClient is the client API for ElasticAgentControl service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ElasticAgentControlClient interface { + // Fetches the currently running version of the Elastic Agent. + Version(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*VersionResponse, error) + // Fetches the currently status of the Elastic Agent. + Status(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*StatusResponse, error) + // Restart restarts the current running Elastic Agent. + Restart(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*RestartResponse, error) + // Upgrade starts the upgrade process of Elastic Agent. + Upgrade(ctx context.Context, in *UpgradeRequest, opts ...grpc.CallOption) (*UpgradeResponse, error) +} + +type elasticAgentControlClient struct { + cc grpc.ClientConnInterface +} + +func NewElasticAgentControlClient(cc grpc.ClientConnInterface) ElasticAgentControlClient { + return &elasticAgentControlClient{cc} +} + +func (c *elasticAgentControlClient) Version(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*VersionResponse, error) { + out := new(VersionResponse) + err := c.cc.Invoke(ctx, "/proto.ElasticAgentControl/Version", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *elasticAgentControlClient) Status(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*StatusResponse, error) { + out := new(StatusResponse) + err := c.cc.Invoke(ctx, "/proto.ElasticAgentControl/Status", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *elasticAgentControlClient) Restart(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*RestartResponse, error) { + out := new(RestartResponse) + err := c.cc.Invoke(ctx, "/proto.ElasticAgentControl/Restart", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *elasticAgentControlClient) Upgrade(ctx context.Context, in *UpgradeRequest, opts ...grpc.CallOption) (*UpgradeResponse, error) { + out := new(UpgradeResponse) + err := c.cc.Invoke(ctx, "/proto.ElasticAgentControl/Upgrade", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ElasticAgentControlServer is the server API for ElasticAgentControl service. +// All implementations must embed UnimplementedElasticAgentControlServer +// for forward compatibility +type ElasticAgentControlServer interface { + // Fetches the currently running version of the Elastic Agent. + Version(context.Context, *Empty) (*VersionResponse, error) + // Fetches the currently status of the Elastic Agent. + Status(context.Context, *Empty) (*StatusResponse, error) + // Restart restarts the current running Elastic Agent. + Restart(context.Context, *Empty) (*RestartResponse, error) + // Upgrade starts the upgrade process of Elastic Agent. + Upgrade(context.Context, *UpgradeRequest) (*UpgradeResponse, error) + mustEmbedUnimplementedElasticAgentControlServer() +} + +// UnimplementedElasticAgentControlServer must be embedded to have forward compatible implementations. +type UnimplementedElasticAgentControlServer struct { +} + +func (UnimplementedElasticAgentControlServer) Version(context.Context, *Empty) (*VersionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Version not implemented") +} +func (UnimplementedElasticAgentControlServer) Status(context.Context, *Empty) (*StatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") +} +func (UnimplementedElasticAgentControlServer) Restart(context.Context, *Empty) (*RestartResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Restart not implemented") +} +func (UnimplementedElasticAgentControlServer) Upgrade(context.Context, *UpgradeRequest) (*UpgradeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Upgrade not implemented") +} +func (UnimplementedElasticAgentControlServer) mustEmbedUnimplementedElasticAgentControlServer() {} + +// UnsafeElasticAgentControlServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ElasticAgentControlServer will +// result in compilation errors. +type UnsafeElasticAgentControlServer interface { + mustEmbedUnimplementedElasticAgentControlServer() +} + +func RegisterElasticAgentControlServer(s grpc.ServiceRegistrar, srv ElasticAgentControlServer) { + s.RegisterService(&ElasticAgentControl_ServiceDesc, srv) +} + +func _ElasticAgentControl_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElasticAgentControlServer).Version(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.ElasticAgentControl/Version", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElasticAgentControlServer).Version(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _ElasticAgentControl_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElasticAgentControlServer).Status(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.ElasticAgentControl/Status", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElasticAgentControlServer).Status(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _ElasticAgentControl_Restart_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElasticAgentControlServer).Restart(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.ElasticAgentControl/Restart", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElasticAgentControlServer).Restart(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _ElasticAgentControl_Upgrade_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpgradeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElasticAgentControlServer).Upgrade(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.ElasticAgentControl/Upgrade", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElasticAgentControlServer).Upgrade(ctx, req.(*UpgradeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// ElasticAgentControl_ServiceDesc is the grpc.ServiceDesc for ElasticAgentControl service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ElasticAgentControl_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "proto.ElasticAgentControl", + HandlerType: (*ElasticAgentControlServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Version", + Handler: _ElasticAgentControl_Version_Handler, + }, + { + MethodName: "Status", + Handler: _ElasticAgentControl_Status_Handler, + }, + { + MethodName: "Restart", + Handler: _ElasticAgentControl_Restart_Handler, + }, + { + MethodName: "Upgrade", + Handler: _ElasticAgentControl_Upgrade_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "control_v1.proto", +} diff --git a/internal/pkg/agent/control/v1/server/server.go b/internal/pkg/agent/control/v1/server/server.go new file mode 100644 index 00000000000..e8baa096bdb --- /dev/null +++ b/internal/pkg/agent/control/v1/server/server.go @@ -0,0 +1,127 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package server + +import ( + "context" + + "go.elastic.co/apm" + + "github.com/elastic/elastic-agent/internal/pkg/agent/control" + "github.com/elastic/elastic-agent/internal/pkg/agent/control/v1/proto" + v2proto "github.com/elastic/elastic-agent/internal/pkg/agent/control/v2/cproto" + "github.com/elastic/elastic-agent/internal/pkg/release" + "github.com/elastic/elastic-agent/pkg/core/logger" +) + +type serverV2 interface { + State(_ context.Context, _ *v2proto.Empty) (*v2proto.StateResponse, error) + Restart(_ context.Context, _ *v2proto.Empty) (*v2proto.RestartResponse, error) + Upgrade(_ context.Context, _ *v2proto.UpgradeRequest) (*v2proto.UpgradeResponse, error) +} + +// Server is the daemon side of the control protocol. +type Server struct { + proto.UnimplementedElasticAgentControlServer + + logger *logger.Logger + v2Server serverV2 + tracer *apm.Tracer +} + +// New creates a new control protocol server. +func New(log *logger.Logger, v2Server serverV2, tracer *apm.Tracer) *Server { + return &Server{ + logger: log, + v2Server: v2Server, + tracer: tracer, + } +} + +// Version returns the currently running version. +func (s *Server) Version(ctx context.Context, _ *proto.Empty) (*proto.VersionResponse, error) { + return &proto.VersionResponse{ + Version: release.Version(), + Commit: release.Commit(), + BuildTime: release.BuildTime().Format(control.TimeFormat()), + Snapshot: release.Snapshot(), + }, nil +} + +// Status returns the overall status of the agent. +func (s *Server) Status(ctx context.Context, _ *proto.Empty) (*proto.StatusResponse, error) { + resp, err := s.v2Server.State(ctx, &v2proto.Empty{}) + if err != nil { + return nil, err + } + + return &proto.StatusResponse{ + Status: agentStateToProto(resp.State), + Message: resp.Message, + Applications: componentStateToProto(resp.Components), + }, nil +} + +// Restart performs re-exec. +func (s *Server) Restart(ctx context.Context, _ *proto.Empty) (*proto.RestartResponse, error) { + _, err := s.v2Server.Restart(ctx, &v2proto.Empty{}) + return &proto.RestartResponse{Status: proto.ActionStatus_V1_SUCCESS}, err + +} + +// Upgrade performs the upgrade operation. +func (s *Server) Upgrade(ctx context.Context, request *proto.UpgradeRequest) (*proto.UpgradeResponse, error) { + resp, _ := s.v2Server.Upgrade(ctx, &v2proto.UpgradeRequest{ + Version: request.Version, + SourceURI: request.SourceURI, + }) + + if resp.Status == v2proto.ActionStatus_FAILURE { + return &proto.UpgradeResponse{ + Status: proto.ActionStatus_V1_FAILURE, + Version: resp.Error, + }, nil + } + + return &proto.UpgradeResponse{ + Status: proto.ActionStatus_V1_SUCCESS, + Version: request.Version, + }, nil +} + +func agentStateToProto(state v2proto.State) proto.Status { + if state == v2proto.State_DEGRADED { + return proto.Status_V1_DEGRADED + } + if state == v2proto.State_FAILED { + return proto.Status_V1_FAILED + } + return proto.Status_V1_HEALTHY +} + +func componentStateToProto(components []*v2proto.ComponentState) []*proto.ApplicationStatus { + s := make([]*proto.ApplicationStatus, len(components)) + for i, c := range components { + compState := agentStateToProto(c.State) + // respect unhealthy unit state + // unit state can be worse than a component one + if compState == proto.Status_V1_HEALTHY { + for _, u := range c.Units { + if unitState := agentStateToProto(u.State); unitState != proto.Status_V1_HEALTHY { + compState = unitState + } + } + } + + s[i] = &proto.ApplicationStatus{ + Id: c.Id, + Name: c.Name, + Status: compState, + Message: c.Message, + Payload: "", + } + } + return s +} diff --git a/internal/pkg/agent/control/client/client.go b/internal/pkg/agent/control/v2/client/client.go similarity index 90% rename from internal/pkg/agent/control/client/client.go rename to internal/pkg/agent/control/v2/client/client.go index 87440c54141..decce6b6909 100644 --- a/internal/pkg/agent/control/client/client.go +++ b/internal/pkg/agent/control/v2/client/client.go @@ -9,11 +9,13 @@ import ( "encoding/json" "errors" "fmt" + "io" "sync" "time" + "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/control" - "github.com/elastic/elastic-agent/internal/pkg/agent/control/cproto" + cproto "github.com/elastic/elastic-agent/internal/pkg/agent/control/v2/cproto" ) // UnitType is the type of the unit @@ -152,21 +154,29 @@ type Client interface { // client manages the state and communication to the Elastic Agent. type client struct { - ctx context.Context - cancel context.CancelFunc - wg sync.WaitGroup - client cproto.ElasticAgentControlClient + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + client cproto.ElasticAgentControlClient + grpcConfig *configuration.GRPCConfig } -// New creates a client connection to Elastic Agent. +// New creates a client connection to Elastic Agent. It uses default grpc configuration for client initialization. func New() Client { - return &client{} + return NewWithConfig(configuration.DefaultGRPCConfig()) +} + +// NewWithConfig creates a client connection to Elastic Agent. +func NewWithConfig(grpcConfig *configuration.GRPCConfig) Client { + return &client{ + grpcConfig: grpcConfig, + } } // Connect connects to the running Elastic Agent. func (c *client) Connect(ctx context.Context) error { c.ctx, c.cancel = context.WithCancel(ctx) - conn, err := dialContext(ctx) + conn, err := dialContext(ctx, c.grpcConfig) if err != nil { return err } @@ -316,13 +326,22 @@ func (c *client) DiagnosticUnits(ctx context.Context, units ...DiagnosticUnitReq }) } - resp, err := c.client.DiagnosticUnits(ctx, &cproto.DiagnosticUnitsRequest{Units: reqs}) + respStream, err := c.client.DiagnosticUnits(ctx, &cproto.DiagnosticUnitsRequest{Units: reqs}) if err != nil { return nil, err } - results := make([]DiagnosticUnitResult, 0, len(resp.Units)) - for _, u := range resp.Units { + results := make([]DiagnosticUnitResult, 0) + for { + var u *cproto.DiagnosticUnitResponse + u, err = respStream.Recv() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, fmt.Errorf("failed to retrieve unit diagnostics: %w", err) + } + files := make([]DiagnosticFileResult, 0, len(u.Results)) for _, f := range u.Results { files = append(files, DiagnosticFileResult{ @@ -346,5 +365,6 @@ func (c *client) DiagnosticUnits(ctx context.Context, units ...DiagnosticUnitReq Results: files, }) } + return results, nil } diff --git a/internal/pkg/agent/control/v2/client/dial.go b/internal/pkg/agent/control/v2/client/dial.go new file mode 100644 index 00000000000..f27c0d05c64 --- /dev/null +++ b/internal/pkg/agent/control/v2/client/dial.go @@ -0,0 +1,35 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build !windows +// +build !windows + +package client + +import ( + "context" + "net" + "strings" + + "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" + "github.com/elastic/elastic-agent/internal/pkg/agent/control" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +func dialContext(ctx context.Context, grpcConfig *configuration.GRPCConfig) (*grpc.ClientConn, error) { + return grpc.DialContext( + ctx, + strings.TrimPrefix(control.Address(), "unix://"), + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithContextDialer(dialer), + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(grpcConfig.MaxMsgSize)), + ) +} + +func dialer(ctx context.Context, addr string) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, "unix", addr) +} diff --git a/internal/pkg/agent/control/v2/client/dial_windows.go b/internal/pkg/agent/control/v2/client/dial_windows.go new file mode 100644 index 00000000000..577c39134ca --- /dev/null +++ b/internal/pkg/agent/control/v2/client/dial_windows.go @@ -0,0 +1,34 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows +// +build windows + +package client + +import ( + "context" + "net" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + "github.com/elastic/elastic-agent-libs/api/npipe" + "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" + "github.com/elastic/elastic-agent/internal/pkg/agent/control" +) + +func dialContext(ctx context.Context, grpcConfig *configuration.GRPCConfig) (*grpc.ClientConn, error) { + return grpc.DialContext( + ctx, + control.Address(), + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithContextDialer(dialer), + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(grpcConfig.MaxMsgSize)), + ) +} + +func dialer(ctx context.Context, addr string) (net.Conn, error) { + return npipe.DialContext(addr)(ctx, "", "") +} diff --git a/internal/pkg/agent/control/control_test.go b/internal/pkg/agent/control/v2/control_test.go similarity index 82% rename from internal/pkg/agent/control/control_test.go rename to internal/pkg/agent/control/v2/control_test.go index 3937b374a36..f4ec7a26452 100644 --- a/internal/pkg/agent/control/control_test.go +++ b/internal/pkg/agent/control/v2/control_test.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package control_test +package v1_test import ( "context" @@ -14,14 +14,15 @@ import ( "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent-libs/logp" - "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" - "github.com/elastic/elastic-agent/internal/pkg/agent/control/server" + "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" + "github.com/elastic/elastic-agent/internal/pkg/agent/control/v2/client" + "github.com/elastic/elastic-agent/internal/pkg/agent/control/v2/server" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" ) func TestServerClient_Version(t *testing.T) { - srv := server.New(newErrorLogger(t), nil, nil, apmtest.DiscardTracer, nil) + srv := server.New(newErrorLogger(t), nil, nil, apmtest.DiscardTracer, nil, configuration.DefaultGRPCConfig()) err := srv.Start() require.NoError(t, err) defer srv.Stop() diff --git a/internal/pkg/agent/control/cproto/control.pb.go b/internal/pkg/agent/control/v2/cproto/control_v2.pb.go similarity index 64% rename from internal/pkg/agent/control/cproto/control.pb.go rename to internal/pkg/agent/control/v2/cproto/control_v2.pb.go index 01588cfa4c1..3e998191173 100644 --- a/internal/pkg/agent/control/cproto/control.pb.go +++ b/internal/pkg/agent/control/v2/cproto/control_v2.pb.go @@ -5,18 +5,17 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.5 -// source: control.proto +// protoc v3.21.9 +// source: control_v2.proto package cproto import ( - reflect "reflect" - sync "sync" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" ) const ( @@ -78,11 +77,11 @@ func (x State) String() string { } func (State) Descriptor() protoreflect.EnumDescriptor { - return file_control_proto_enumTypes[0].Descriptor() + return file_control_v2_proto_enumTypes[0].Descriptor() } func (State) Type() protoreflect.EnumType { - return &file_control_proto_enumTypes[0] + return &file_control_v2_proto_enumTypes[0] } func (x State) Number() protoreflect.EnumNumber { @@ -91,7 +90,7 @@ func (x State) Number() protoreflect.EnumNumber { // Deprecated: Use State.Descriptor instead. func (State) EnumDescriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{0} + return file_control_v2_proto_rawDescGZIP(), []int{0} } // Unit Type running inside a component. @@ -125,11 +124,11 @@ func (x UnitType) String() string { } func (UnitType) Descriptor() protoreflect.EnumDescriptor { - return file_control_proto_enumTypes[1].Descriptor() + return file_control_v2_proto_enumTypes[1].Descriptor() } func (UnitType) Type() protoreflect.EnumType { - return &file_control_proto_enumTypes[1] + return &file_control_v2_proto_enumTypes[1] } func (x UnitType) Number() protoreflect.EnumNumber { @@ -138,7 +137,7 @@ func (x UnitType) Number() protoreflect.EnumNumber { // Deprecated: Use UnitType.Descriptor instead. func (UnitType) EnumDescriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{1} + return file_control_v2_proto_rawDescGZIP(), []int{1} } // Action status codes for restart and upgrade response. @@ -174,11 +173,11 @@ func (x ActionStatus) String() string { } func (ActionStatus) Descriptor() protoreflect.EnumDescriptor { - return file_control_proto_enumTypes[2].Descriptor() + return file_control_v2_proto_enumTypes[2].Descriptor() } func (ActionStatus) Type() protoreflect.EnumType { - return &file_control_proto_enumTypes[2] + return &file_control_v2_proto_enumTypes[2] } func (x ActionStatus) Number() protoreflect.EnumNumber { @@ -187,7 +186,7 @@ func (x ActionStatus) Number() protoreflect.EnumNumber { // Deprecated: Use ActionStatus.Descriptor instead. func (ActionStatus) EnumDescriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{2} + return file_control_v2_proto_rawDescGZIP(), []int{2} } // pprof endpoint that can be requested. @@ -242,11 +241,11 @@ func (x PprofOption) String() string { } func (PprofOption) Descriptor() protoreflect.EnumDescriptor { - return file_control_proto_enumTypes[3].Descriptor() + return file_control_v2_proto_enumTypes[3].Descriptor() } func (PprofOption) Type() protoreflect.EnumType { - return &file_control_proto_enumTypes[3] + return &file_control_v2_proto_enumTypes[3] } func (x PprofOption) Number() protoreflect.EnumNumber { @@ -255,7 +254,7 @@ func (x PprofOption) Number() protoreflect.EnumNumber { // Deprecated: Use PprofOption.Descriptor instead. func (PprofOption) EnumDescriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{3} + return file_control_v2_proto_rawDescGZIP(), []int{3} } // Empty message. @@ -268,7 +267,7 @@ type Empty struct { func (x *Empty) Reset() { *x = Empty{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[0] + mi := &file_control_v2_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -281,7 +280,7 @@ func (x *Empty) String() string { func (*Empty) ProtoMessage() {} func (x *Empty) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[0] + mi := &file_control_v2_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -294,7 +293,7 @@ func (x *Empty) ProtoReflect() protoreflect.Message { // Deprecated: Use Empty.ProtoReflect.Descriptor instead. func (*Empty) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{0} + return file_control_v2_proto_rawDescGZIP(), []int{0} } // Version response message. @@ -316,7 +315,7 @@ type VersionResponse struct { func (x *VersionResponse) Reset() { *x = VersionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[1] + mi := &file_control_v2_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -329,7 +328,7 @@ func (x *VersionResponse) String() string { func (*VersionResponse) ProtoMessage() {} func (x *VersionResponse) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[1] + mi := &file_control_v2_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -342,7 +341,7 @@ func (x *VersionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VersionResponse.ProtoReflect.Descriptor instead. func (*VersionResponse) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{1} + return file_control_v2_proto_rawDescGZIP(), []int{1} } func (x *VersionResponse) GetVersion() string { @@ -387,7 +386,7 @@ type RestartResponse struct { func (x *RestartResponse) Reset() { *x = RestartResponse{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[2] + mi := &file_control_v2_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -400,7 +399,7 @@ func (x *RestartResponse) String() string { func (*RestartResponse) ProtoMessage() {} func (x *RestartResponse) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[2] + mi := &file_control_v2_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -413,7 +412,7 @@ func (x *RestartResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RestartResponse.ProtoReflect.Descriptor instead. func (*RestartResponse) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{2} + return file_control_v2_proto_rawDescGZIP(), []int{2} } func (x *RestartResponse) GetStatus() ActionStatus { @@ -452,7 +451,7 @@ type UpgradeRequest struct { func (x *UpgradeRequest) Reset() { *x = UpgradeRequest{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[3] + mi := &file_control_v2_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -465,7 +464,7 @@ func (x *UpgradeRequest) String() string { func (*UpgradeRequest) ProtoMessage() {} func (x *UpgradeRequest) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[3] + mi := &file_control_v2_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -478,7 +477,7 @@ func (x *UpgradeRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpgradeRequest.ProtoReflect.Descriptor instead. func (*UpgradeRequest) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{3} + return file_control_v2_proto_rawDescGZIP(), []int{3} } func (x *UpgradeRequest) GetVersion() string { @@ -512,7 +511,7 @@ type UpgradeResponse struct { func (x *UpgradeResponse) Reset() { *x = UpgradeResponse{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[4] + mi := &file_control_v2_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -525,7 +524,7 @@ func (x *UpgradeResponse) String() string { func (*UpgradeResponse) ProtoMessage() {} func (x *UpgradeResponse) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[4] + mi := &file_control_v2_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -538,7 +537,7 @@ func (x *UpgradeResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use UpgradeResponse.ProtoReflect.Descriptor instead. func (*UpgradeResponse) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{4} + return file_control_v2_proto_rawDescGZIP(), []int{4} } func (x *UpgradeResponse) GetStatus() ActionStatus { @@ -582,7 +581,7 @@ type ComponentUnitState struct { func (x *ComponentUnitState) Reset() { *x = ComponentUnitState{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[5] + mi := &file_control_v2_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -595,7 +594,7 @@ func (x *ComponentUnitState) String() string { func (*ComponentUnitState) ProtoMessage() {} func (x *ComponentUnitState) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[5] + mi := &file_control_v2_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -608,7 +607,7 @@ func (x *ComponentUnitState) ProtoReflect() protoreflect.Message { // Deprecated: Use ComponentUnitState.ProtoReflect.Descriptor instead. func (*ComponentUnitState) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{5} + return file_control_v2_proto_rawDescGZIP(), []int{5} } func (x *ComponentUnitState) GetUnitType() UnitType { @@ -663,7 +662,7 @@ type ComponentVersionInfo struct { func (x *ComponentVersionInfo) Reset() { *x = ComponentVersionInfo{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[6] + mi := &file_control_v2_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -676,7 +675,7 @@ func (x *ComponentVersionInfo) String() string { func (*ComponentVersionInfo) ProtoMessage() {} func (x *ComponentVersionInfo) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[6] + mi := &file_control_v2_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -689,7 +688,7 @@ func (x *ComponentVersionInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use ComponentVersionInfo.ProtoReflect.Descriptor instead. func (*ComponentVersionInfo) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{6} + return file_control_v2_proto_rawDescGZIP(), []int{6} } func (x *ComponentVersionInfo) GetName() string { @@ -736,7 +735,7 @@ type ComponentState struct { func (x *ComponentState) Reset() { *x = ComponentState{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[7] + mi := &file_control_v2_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -749,7 +748,7 @@ func (x *ComponentState) String() string { func (*ComponentState) ProtoMessage() {} func (x *ComponentState) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[7] + mi := &file_control_v2_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -762,7 +761,7 @@ func (x *ComponentState) ProtoReflect() protoreflect.Message { // Deprecated: Use ComponentState.ProtoReflect.Descriptor instead. func (*ComponentState) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{7} + return file_control_v2_proto_rawDescGZIP(), []int{7} } func (x *ComponentState) GetId() string { @@ -827,7 +826,7 @@ type StateAgentInfo struct { func (x *StateAgentInfo) Reset() { *x = StateAgentInfo{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[8] + mi := &file_control_v2_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -840,7 +839,7 @@ func (x *StateAgentInfo) String() string { func (*StateAgentInfo) ProtoMessage() {} func (x *StateAgentInfo) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[8] + mi := &file_control_v2_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -853,7 +852,7 @@ func (x *StateAgentInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use StateAgentInfo.ProtoReflect.Descriptor instead. func (*StateAgentInfo) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{8} + return file_control_v2_proto_rawDescGZIP(), []int{8} } func (x *StateAgentInfo) GetId() string { @@ -901,16 +900,16 @@ type StateResponse struct { Info *StateAgentInfo `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` // Overall state of Elastic Agent. State State `protobuf:"varint,2,opt,name=state,proto3,enum=cproto.State" json:"state,omitempty"` - // Overall status message of Elastic Agent. + // Overall state message of Elastic Agent. Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` - // Status of each component in Elastic Agent. + // State of each component in Elastic Agent. Components []*ComponentState `protobuf:"bytes,4,rep,name=components,proto3" json:"components,omitempty"` } func (x *StateResponse) Reset() { *x = StateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[9] + mi := &file_control_v2_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -923,7 +922,7 @@ func (x *StateResponse) String() string { func (*StateResponse) ProtoMessage() {} func (x *StateResponse) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[9] + mi := &file_control_v2_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -936,7 +935,7 @@ func (x *StateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StateResponse.ProtoReflect.Descriptor instead. func (*StateResponse) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{9} + return file_control_v2_proto_rawDescGZIP(), []int{9} } func (x *StateResponse) GetInfo() *StateAgentInfo { @@ -990,7 +989,7 @@ type DiagnosticFileResult struct { func (x *DiagnosticFileResult) Reset() { *x = DiagnosticFileResult{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[10] + mi := &file_control_v2_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1003,7 +1002,7 @@ func (x *DiagnosticFileResult) String() string { func (*DiagnosticFileResult) ProtoMessage() {} func (x *DiagnosticFileResult) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[10] + mi := &file_control_v2_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1016,7 +1015,7 @@ func (x *DiagnosticFileResult) ProtoReflect() protoreflect.Message { // Deprecated: Use DiagnosticFileResult.ProtoReflect.Descriptor instead. func (*DiagnosticFileResult) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{10} + return file_control_v2_proto_rawDescGZIP(), []int{10} } func (x *DiagnosticFileResult) GetName() string { @@ -1071,7 +1070,7 @@ type DiagnosticAgentRequest struct { func (x *DiagnosticAgentRequest) Reset() { *x = DiagnosticAgentRequest{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[11] + mi := &file_control_v2_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1084,7 +1083,7 @@ func (x *DiagnosticAgentRequest) String() string { func (*DiagnosticAgentRequest) ProtoMessage() {} func (x *DiagnosticAgentRequest) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[11] + mi := &file_control_v2_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1097,7 +1096,7 @@ func (x *DiagnosticAgentRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DiagnosticAgentRequest.ProtoReflect.Descriptor instead. func (*DiagnosticAgentRequest) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{11} + return file_control_v2_proto_rawDescGZIP(), []int{11} } // DiagnosticAgentResponse is response to gathered diagnostic information about the Elastic Agent. @@ -1113,7 +1112,7 @@ type DiagnosticAgentResponse struct { func (x *DiagnosticAgentResponse) Reset() { *x = DiagnosticAgentResponse{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[12] + mi := &file_control_v2_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1126,7 +1125,7 @@ func (x *DiagnosticAgentResponse) String() string { func (*DiagnosticAgentResponse) ProtoMessage() {} func (x *DiagnosticAgentResponse) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[12] + mi := &file_control_v2_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1139,7 +1138,7 @@ func (x *DiagnosticAgentResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DiagnosticAgentResponse.ProtoReflect.Descriptor instead. func (*DiagnosticAgentResponse) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{12} + return file_control_v2_proto_rawDescGZIP(), []int{12} } func (x *DiagnosticAgentResponse) GetResults() []*DiagnosticFileResult { @@ -1166,7 +1165,7 @@ type DiagnosticUnitRequest struct { func (x *DiagnosticUnitRequest) Reset() { *x = DiagnosticUnitRequest{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[13] + mi := &file_control_v2_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1179,7 +1178,7 @@ func (x *DiagnosticUnitRequest) String() string { func (*DiagnosticUnitRequest) ProtoMessage() {} func (x *DiagnosticUnitRequest) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[13] + mi := &file_control_v2_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1192,7 +1191,7 @@ func (x *DiagnosticUnitRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DiagnosticUnitRequest.ProtoReflect.Descriptor instead. func (*DiagnosticUnitRequest) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{13} + return file_control_v2_proto_rawDescGZIP(), []int{13} } func (x *DiagnosticUnitRequest) GetComponentId() string { @@ -1229,7 +1228,7 @@ type DiagnosticUnitsRequest struct { func (x *DiagnosticUnitsRequest) Reset() { *x = DiagnosticUnitsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[14] + mi := &file_control_v2_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1242,7 +1241,7 @@ func (x *DiagnosticUnitsRequest) String() string { func (*DiagnosticUnitsRequest) ProtoMessage() {} func (x *DiagnosticUnitsRequest) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[14] + mi := &file_control_v2_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1255,7 +1254,7 @@ func (x *DiagnosticUnitsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DiagnosticUnitsRequest.ProtoReflect.Descriptor instead. func (*DiagnosticUnitsRequest) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{14} + return file_control_v2_proto_rawDescGZIP(), []int{14} } func (x *DiagnosticUnitsRequest) GetUnits() []*DiagnosticUnitRequest { @@ -1286,7 +1285,7 @@ type DiagnosticUnitResponse struct { func (x *DiagnosticUnitResponse) Reset() { *x = DiagnosticUnitResponse{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[15] + mi := &file_control_v2_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1299,7 +1298,7 @@ func (x *DiagnosticUnitResponse) String() string { func (*DiagnosticUnitResponse) ProtoMessage() {} func (x *DiagnosticUnitResponse) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[15] + mi := &file_control_v2_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1312,7 +1311,7 @@ func (x *DiagnosticUnitResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DiagnosticUnitResponse.ProtoReflect.Descriptor instead. func (*DiagnosticUnitResponse) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{15} + return file_control_v2_proto_rawDescGZIP(), []int{15} } func (x *DiagnosticUnitResponse) GetComponentId() string { @@ -1363,7 +1362,7 @@ type DiagnosticUnitsResponse struct { func (x *DiagnosticUnitsResponse) Reset() { *x = DiagnosticUnitsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[16] + mi := &file_control_v2_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1376,7 +1375,7 @@ func (x *DiagnosticUnitsResponse) String() string { func (*DiagnosticUnitsResponse) ProtoMessage() {} func (x *DiagnosticUnitsResponse) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[16] + mi := &file_control_v2_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1389,7 +1388,7 @@ func (x *DiagnosticUnitsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DiagnosticUnitsResponse.ProtoReflect.Descriptor instead. func (*DiagnosticUnitsResponse) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{16} + return file_control_v2_proto_rawDescGZIP(), []int{16} } func (x *DiagnosticUnitsResponse) GetUnits() []*DiagnosticUnitResponse { @@ -1399,216 +1398,216 @@ func (x *DiagnosticUnitsResponse) GetUnits() []*DiagnosticUnitResponse { return nil } -var File_control_proto protoreflect.FileDescriptor - -var file_control_proto_rawDesc = []byte{ - 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x06, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x22, 0x7d, 0x0a, 0x0f, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, - 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x54, - 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x22, 0x55, 0x0a, 0x0f, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x48, 0x0a, 0x0e, 0x55, 0x70, 0x67, 0x72, 0x61, - 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x49, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, - 0x49, 0x22, 0x6f, 0x0a, 0x0f, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x22, 0xb5, 0x01, 0x0a, 0x12, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, - 0x55, 0x6e, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x6e, 0x69, - 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x63, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, - 0x75, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x6e, 0x69, 0x74, - 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x6e, 0x69, 0x74, 0x49, - 0x64, 0x12, 0x23, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xb9, 0x01, 0x0a, 0x14, 0x43, - 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x3a, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x26, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, - 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4d, 0x65, - 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x1a, 0x37, 0x0a, - 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe6, 0x01, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, - 0x6e, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x63, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x05, - 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x55, 0x6e, - 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x12, 0x3f, - 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, - 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, - 0x66, 0x6f, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x22, - 0x8c, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x49, 0x6e, - 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, - 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x54, 0x69, 0x6d, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x54, 0x69, - 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0xb2, - 0x01, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x2a, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, - 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x41, 0x67, 0x65, - 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x05, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x63, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x0a, 0x63, - 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x16, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, - 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, - 0x6e, 0x74, 0x73, 0x22, 0xdf, 0x01, 0x0a, 0x14, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, - 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, - 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x65, 0x64, 0x22, 0x18, 0x0a, 0x16, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, - 0x51, 0x0a, 0x17, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, - 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x72, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x46, - 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x15, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, - 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, - 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, - 0x2d, 0x0a, 0x09, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x6e, 0x69, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x75, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, - 0x0a, 0x07, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x75, 0x6e, 0x69, 0x74, 0x49, 0x64, 0x22, 0x4d, 0x0a, 0x16, 0x44, 0x69, 0x61, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x33, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, - 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x22, 0xd1, 0x01, 0x0a, 0x16, 0x44, 0x69, 0x61, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, - 0x6e, 0x74, 0x49, 0x64, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x75, 0x6e, 0x69, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x6e, 0x69, 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x05, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, - 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x4f, 0x0a, 0x17, 0x44, 0x69, - 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, +var File_control_v2_proto protoreflect.FileDescriptor + +var file_control_v2_proto_rawDesc = []byte{ + 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x5f, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x06, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x07, 0x0a, 0x05, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x22, 0x7d, 0x0a, 0x0f, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x22, 0x55, 0x0a, 0x0f, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x48, 0x0a, 0x0e, 0x55, 0x70, + 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x55, 0x52, 0x49, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x55, 0x52, 0x49, 0x22, 0x6f, 0x0a, 0x0f, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xb5, 0x01, 0x0a, 0x12, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x55, 0x6e, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2d, 0x0a, 0x09, + 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x10, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x52, 0x08, 0x75, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, + 0x6e, 0x69, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x6e, + 0x69, 0x74, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xb9, 0x01, + 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, + 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, + 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, + 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe6, 0x01, 0x0a, 0x0e, 0x43, 0x6f, + 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x23, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x30, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, + 0x74, 0x55, 0x6e, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x75, 0x6e, 0x69, 0x74, + 0x73, 0x12, 0x3f, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, + 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, + 0x66, 0x6f, 0x22, 0x8c, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x41, 0x67, 0x65, 0x6e, + 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x54, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x22, 0xb2, 0x01, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x41, 0x67, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, + 0x23, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, + 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, + 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, + 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, + 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xdf, 0x01, 0x0a, 0x14, 0x44, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x38, + 0x0a, 0x09, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x22, 0x18, 0x0a, 0x16, 0x44, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x22, 0x51, 0x0a, 0x17, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, + 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x15, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x49, 0x64, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, + 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x75, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x75, 0x6e, 0x69, 0x74, 0x49, 0x64, 0x22, 0x4d, 0x0a, 0x16, 0x44, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x52, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x22, 0xd1, 0x01, 0x0a, 0x16, 0x44, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, + 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, + 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x6e, 0x69, 0x74, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x63, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x75, 0x6e, + 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x6e, 0x69, 0x74, 0x49, 0x64, 0x12, + 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x4f, 0x0a, + 0x17, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x74, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x2a, 0x85, + 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x41, 0x52, + 0x54, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, + 0x55, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, 0x41, 0x4c, 0x54, + 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, 0x52, 0x41, 0x44, 0x45, 0x44, + 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0c, + 0x0a, 0x08, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, + 0x53, 0x54, 0x4f, 0x50, 0x50, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x50, 0x47, + 0x52, 0x41, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x07, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, + 0x42, 0x41, 0x43, 0x4b, 0x10, 0x08, 0x2a, 0x21, 0x0a, 0x08, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x10, 0x00, 0x12, 0x0a, 0x0a, + 0x06, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x28, 0x0a, 0x0c, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, + 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, + 0x45, 0x10, 0x01, 0x2a, 0x7f, 0x0a, 0x0b, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x4c, 0x4c, 0x4f, 0x43, 0x53, 0x10, 0x00, 0x12, 0x09, + 0x0a, 0x05, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4d, 0x44, + 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x47, 0x4f, 0x52, 0x4f, 0x55, 0x54, + 0x49, 0x4e, 0x45, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x45, 0x41, 0x50, 0x10, 0x04, 0x12, + 0x09, 0x0a, 0x05, 0x4d, 0x55, 0x54, 0x45, 0x58, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, + 0x4f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x48, 0x52, 0x45, 0x41, + 0x44, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52, 0x41, + 0x43, 0x45, 0x10, 0x08, 0x32, 0x8f, 0x03, 0x0a, 0x13, 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, + 0x41, 0x67, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x31, 0x0a, 0x07, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x2d, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, + 0x0a, 0x07, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, 0x16, 0x2e, 0x63, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, + 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, + 0x0f, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, + 0x12, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1f, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x53, 0x0a, 0x0f, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, + 0x6e, 0x69, 0x74, 0x73, 0x12, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x2a, 0x85, 0x01, 0x0a, 0x05, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e, - 0x47, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x49, - 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, - 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, 0x52, 0x41, 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, - 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, - 0x54, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x54, 0x4f, - 0x50, 0x50, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x50, 0x47, 0x52, 0x41, 0x44, - 0x49, 0x4e, 0x47, 0x10, 0x07, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, - 0x4b, 0x10, 0x08, 0x2a, 0x21, 0x0a, 0x08, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x55, - 0x54, 0x50, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x28, 0x0a, 0x0c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, - 0x53, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, - 0x2a, 0x7f, 0x0a, 0x0b, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x0a, 0x0a, 0x06, 0x41, 0x4c, 0x4c, 0x4f, 0x43, 0x53, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x42, - 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4d, 0x44, 0x4c, 0x49, 0x4e, - 0x45, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x47, 0x4f, 0x52, 0x4f, 0x55, 0x54, 0x49, 0x4e, 0x45, - 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x45, 0x41, 0x50, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, - 0x4d, 0x55, 0x54, 0x45, 0x58, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x4f, 0x46, 0x49, - 0x4c, 0x45, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x48, 0x52, 0x45, 0x41, 0x44, 0x43, 0x52, - 0x45, 0x41, 0x54, 0x45, 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, - 0x08, 0x32, 0x8e, 0x03, 0x0a, 0x13, 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, - 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x31, 0x0a, 0x07, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x07, 0x52, - 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, - 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, - 0x0a, 0x07, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, 0x16, 0x2e, 0x63, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, - 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x0f, 0x44, 0x69, - 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x1e, 0x2e, - 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, - 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, - 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, - 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, - 0x0a, 0x0f, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, - 0x73, 0x12, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1f, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x42, 0x26, 0x5a, 0x21, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, - 0x6b, 0x67, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x2f, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, 0x29, 0x5a, 0x24, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0xf8, 0x01, + 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - file_control_proto_rawDescOnce sync.Once - file_control_proto_rawDescData = file_control_proto_rawDesc + file_control_v2_proto_rawDescOnce sync.Once + file_control_v2_proto_rawDescData = file_control_v2_proto_rawDesc ) -func file_control_proto_rawDescGZIP() []byte { - file_control_proto_rawDescOnce.Do(func() { - file_control_proto_rawDescData = protoimpl.X.CompressGZIP(file_control_proto_rawDescData) +func file_control_v2_proto_rawDescGZIP() []byte { + file_control_v2_proto_rawDescOnce.Do(func() { + file_control_v2_proto_rawDescData = protoimpl.X.CompressGZIP(file_control_v2_proto_rawDescData) }) - return file_control_proto_rawDescData + return file_control_v2_proto_rawDescData } -var file_control_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_control_proto_msgTypes = make([]protoimpl.MessageInfo, 18) -var file_control_proto_goTypes = []interface{}{ +var file_control_v2_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_control_v2_proto_msgTypes = make([]protoimpl.MessageInfo, 18) +var file_control_v2_proto_goTypes = []interface{}{ (State)(0), // 0: cproto.State (UnitType)(0), // 1: cproto.UnitType (ActionStatus)(0), // 2: cproto.ActionStatus @@ -1633,7 +1632,7 @@ var file_control_proto_goTypes = []interface{}{ nil, // 21: cproto.ComponentVersionInfo.MetaEntry (*timestamppb.Timestamp)(nil), // 22: google.protobuf.Timestamp } -var file_control_proto_depIdxs = []int32{ +var file_control_v2_proto_depIdxs = []int32{ 2, // 0: cproto.RestartResponse.status:type_name -> cproto.ActionStatus 2, // 1: cproto.UpgradeResponse.status:type_name -> cproto.ActionStatus 1, // 2: cproto.ComponentUnitState.unit_type:type_name -> cproto.UnitType @@ -1663,7 +1662,7 @@ var file_control_proto_depIdxs = []int32{ 6, // 26: cproto.ElasticAgentControl.Restart:output_type -> cproto.RestartResponse 8, // 27: cproto.ElasticAgentControl.Upgrade:output_type -> cproto.UpgradeResponse 16, // 28: cproto.ElasticAgentControl.DiagnosticAgent:output_type -> cproto.DiagnosticAgentResponse - 20, // 29: cproto.ElasticAgentControl.DiagnosticUnits:output_type -> cproto.DiagnosticUnitsResponse + 19, // 29: cproto.ElasticAgentControl.DiagnosticUnits:output_type -> cproto.DiagnosticUnitResponse 24, // [24:30] is the sub-list for method output_type 18, // [18:24] is the sub-list for method input_type 18, // [18:18] is the sub-list for extension type_name @@ -1671,13 +1670,13 @@ var file_control_proto_depIdxs = []int32{ 0, // [0:18] is the sub-list for field type_name } -func init() { file_control_proto_init() } -func file_control_proto_init() { - if File_control_proto != nil { +func init() { file_control_v2_proto_init() } +func file_control_v2_proto_init() { + if File_control_v2_proto != nil { return } if !protoimpl.UnsafeEnabled { - file_control_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_control_v2_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Empty); i { case 0: return &v.state @@ -1689,7 +1688,7 @@ func file_control_proto_init() { return nil } } - file_control_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_control_v2_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VersionResponse); i { case 0: return &v.state @@ -1701,7 +1700,7 @@ func file_control_proto_init() { return nil } } - file_control_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_control_v2_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RestartResponse); i { case 0: return &v.state @@ -1713,7 +1712,7 @@ func file_control_proto_init() { return nil } } - file_control_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_control_v2_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UpgradeRequest); i { case 0: return &v.state @@ -1725,7 +1724,7 @@ func file_control_proto_init() { return nil } } - file_control_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_control_v2_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UpgradeResponse); i { case 0: return &v.state @@ -1737,7 +1736,7 @@ func file_control_proto_init() { return nil } } - file_control_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_control_v2_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ComponentUnitState); i { case 0: return &v.state @@ -1749,7 +1748,7 @@ func file_control_proto_init() { return nil } } - file_control_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_control_v2_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ComponentVersionInfo); i { case 0: return &v.state @@ -1761,7 +1760,7 @@ func file_control_proto_init() { return nil } } - file_control_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_control_v2_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ComponentState); i { case 0: return &v.state @@ -1773,7 +1772,7 @@ func file_control_proto_init() { return nil } } - file_control_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_control_v2_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StateAgentInfo); i { case 0: return &v.state @@ -1785,7 +1784,7 @@ func file_control_proto_init() { return nil } } - file_control_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_control_v2_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StateResponse); i { case 0: return &v.state @@ -1797,7 +1796,7 @@ func file_control_proto_init() { return nil } } - file_control_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_control_v2_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DiagnosticFileResult); i { case 0: return &v.state @@ -1809,7 +1808,7 @@ func file_control_proto_init() { return nil } } - file_control_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_control_v2_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DiagnosticAgentRequest); i { case 0: return &v.state @@ -1821,7 +1820,7 @@ func file_control_proto_init() { return nil } } - file_control_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_control_v2_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DiagnosticAgentResponse); i { case 0: return &v.state @@ -1833,7 +1832,7 @@ func file_control_proto_init() { return nil } } - file_control_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_control_v2_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DiagnosticUnitRequest); i { case 0: return &v.state @@ -1845,7 +1844,7 @@ func file_control_proto_init() { return nil } } - file_control_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_control_v2_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DiagnosticUnitsRequest); i { case 0: return &v.state @@ -1857,7 +1856,7 @@ func file_control_proto_init() { return nil } } - file_control_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_control_v2_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DiagnosticUnitResponse); i { case 0: return &v.state @@ -1869,7 +1868,7 @@ func file_control_proto_init() { return nil } } - file_control_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_control_v2_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DiagnosticUnitsResponse); i { case 0: return &v.state @@ -1886,19 +1885,19 @@ func file_control_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_control_proto_rawDesc, + RawDescriptor: file_control_v2_proto_rawDesc, NumEnums: 4, NumMessages: 18, NumExtensions: 0, NumServices: 1, }, - GoTypes: file_control_proto_goTypes, - DependencyIndexes: file_control_proto_depIdxs, - EnumInfos: file_control_proto_enumTypes, - MessageInfos: file_control_proto_msgTypes, + GoTypes: file_control_v2_proto_goTypes, + DependencyIndexes: file_control_v2_proto_depIdxs, + EnumInfos: file_control_v2_proto_enumTypes, + MessageInfos: file_control_v2_proto_msgTypes, }.Build() - File_control_proto = out.File - file_control_proto_rawDesc = nil - file_control_proto_goTypes = nil - file_control_proto_depIdxs = nil + File_control_v2_proto = out.File + file_control_v2_proto_rawDesc = nil + file_control_v2_proto_goTypes = nil + file_control_v2_proto_depIdxs = nil } diff --git a/internal/pkg/agent/control/cproto/control_grpc.pb.go b/internal/pkg/agent/control/v2/cproto/control_v2_grpc.pb.go similarity index 83% rename from internal/pkg/agent/control/cproto/control_grpc.pb.go rename to internal/pkg/agent/control/v2/cproto/control_v2_grpc.pb.go index f00afb24d2b..f7c377c84eb 100644 --- a/internal/pkg/agent/control/cproto/control_grpc.pb.go +++ b/internal/pkg/agent/control/v2/cproto/control_v2_grpc.pb.go @@ -1,18 +1,13 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.5 -// source: control.proto +// - protoc v3.21.9 +// source: control_v2.proto package cproto import ( context "context" - grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" @@ -38,7 +33,7 @@ type ElasticAgentControlClient interface { // Gather diagnostic information for the running Elastic Agent. DiagnosticAgent(ctx context.Context, in *DiagnosticAgentRequest, opts ...grpc.CallOption) (*DiagnosticAgentResponse, error) // Gather diagnostic information for the running units. - DiagnosticUnits(ctx context.Context, in *DiagnosticUnitsRequest, opts ...grpc.CallOption) (*DiagnosticUnitsResponse, error) + DiagnosticUnits(ctx context.Context, in *DiagnosticUnitsRequest, opts ...grpc.CallOption) (ElasticAgentControl_DiagnosticUnitsClient, error) } type elasticAgentControlClient struct { @@ -94,13 +89,36 @@ func (c *elasticAgentControlClient) DiagnosticAgent(ctx context.Context, in *Dia return out, nil } -func (c *elasticAgentControlClient) DiagnosticUnits(ctx context.Context, in *DiagnosticUnitsRequest, opts ...grpc.CallOption) (*DiagnosticUnitsResponse, error) { - out := new(DiagnosticUnitsResponse) - err := c.cc.Invoke(ctx, "/cproto.ElasticAgentControl/DiagnosticUnits", in, out, opts...) +func (c *elasticAgentControlClient) DiagnosticUnits(ctx context.Context, in *DiagnosticUnitsRequest, opts ...grpc.CallOption) (ElasticAgentControl_DiagnosticUnitsClient, error) { + stream, err := c.cc.NewStream(ctx, &ElasticAgentControl_ServiceDesc.Streams[0], "/cproto.ElasticAgentControl/DiagnosticUnits", opts...) if err != nil { return nil, err } - return out, nil + x := &elasticAgentControlDiagnosticUnitsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type ElasticAgentControl_DiagnosticUnitsClient interface { + Recv() (*DiagnosticUnitResponse, error) + grpc.ClientStream +} + +type elasticAgentControlDiagnosticUnitsClient struct { + grpc.ClientStream +} + +func (x *elasticAgentControlDiagnosticUnitsClient) Recv() (*DiagnosticUnitResponse, error) { + m := new(DiagnosticUnitResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil } // ElasticAgentControlServer is the server API for ElasticAgentControl service. @@ -118,7 +136,7 @@ type ElasticAgentControlServer interface { // Gather diagnostic information for the running Elastic Agent. DiagnosticAgent(context.Context, *DiagnosticAgentRequest) (*DiagnosticAgentResponse, error) // Gather diagnostic information for the running units. - DiagnosticUnits(context.Context, *DiagnosticUnitsRequest) (*DiagnosticUnitsResponse, error) + DiagnosticUnits(*DiagnosticUnitsRequest, ElasticAgentControl_DiagnosticUnitsServer) error mustEmbedUnimplementedElasticAgentControlServer() } @@ -141,8 +159,8 @@ func (UnimplementedElasticAgentControlServer) Upgrade(context.Context, *UpgradeR func (UnimplementedElasticAgentControlServer) DiagnosticAgent(context.Context, *DiagnosticAgentRequest) (*DiagnosticAgentResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method DiagnosticAgent not implemented") } -func (UnimplementedElasticAgentControlServer) DiagnosticUnits(context.Context, *DiagnosticUnitsRequest) (*DiagnosticUnitsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DiagnosticUnits not implemented") +func (UnimplementedElasticAgentControlServer) DiagnosticUnits(*DiagnosticUnitsRequest, ElasticAgentControl_DiagnosticUnitsServer) error { + return status.Errorf(codes.Unimplemented, "method DiagnosticUnits not implemented") } func (UnimplementedElasticAgentControlServer) mustEmbedUnimplementedElasticAgentControlServer() {} @@ -247,22 +265,25 @@ func _ElasticAgentControl_DiagnosticAgent_Handler(srv interface{}, ctx context.C return interceptor(ctx, in, info, handler) } -func _ElasticAgentControl_DiagnosticUnits_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DiagnosticUnitsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ElasticAgentControlServer).DiagnosticUnits(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cproto.ElasticAgentControl/DiagnosticUnits", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ElasticAgentControlServer).DiagnosticUnits(ctx, req.(*DiagnosticUnitsRequest)) +func _ElasticAgentControl_DiagnosticUnits_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(DiagnosticUnitsRequest) + if err := stream.RecvMsg(m); err != nil { + return err } - return interceptor(ctx, in, info, handler) + return srv.(ElasticAgentControlServer).DiagnosticUnits(m, &elasticAgentControlDiagnosticUnitsServer{stream}) +} + +type ElasticAgentControl_DiagnosticUnitsServer interface { + Send(*DiagnosticUnitResponse) error + grpc.ServerStream +} + +type elasticAgentControlDiagnosticUnitsServer struct { + grpc.ServerStream +} + +func (x *elasticAgentControlDiagnosticUnitsServer) Send(m *DiagnosticUnitResponse) error { + return x.ServerStream.SendMsg(m) } // ElasticAgentControl_ServiceDesc is the grpc.ServiceDesc for ElasticAgentControl service. @@ -292,11 +313,13 @@ var ElasticAgentControl_ServiceDesc = grpc.ServiceDesc{ MethodName: "DiagnosticAgent", Handler: _ElasticAgentControl_DiagnosticAgent_Handler, }, + }, + Streams: []grpc.StreamDesc{ { - MethodName: "DiagnosticUnits", - Handler: _ElasticAgentControl_DiagnosticUnits_Handler, + StreamName: "DiagnosticUnits", + Handler: _ElasticAgentControl_DiagnosticUnits_Handler, + ServerStreams: true, }, }, - Streams: []grpc.StreamDesc{}, - Metadata: "control.proto", + Metadata: "control_v2.proto", } diff --git a/internal/pkg/agent/control/server/listener.go b/internal/pkg/agent/control/v2/server/listener.go similarity index 100% rename from internal/pkg/agent/control/server/listener.go rename to internal/pkg/agent/control/v2/server/listener.go diff --git a/internal/pkg/agent/control/server/listener_windows.go b/internal/pkg/agent/control/v2/server/listener_windows.go similarity index 100% rename from internal/pkg/agent/control/server/listener_windows.go rename to internal/pkg/agent/control/v2/server/listener_windows.go diff --git a/internal/pkg/agent/control/server/server.go b/internal/pkg/agent/control/v2/server/server.go similarity index 84% rename from internal/pkg/agent/control/server/server.go rename to internal/pkg/agent/control/v2/server/server.go index 67fe85fab2b..057761f686e 100644 --- a/internal/pkg/agent/control/server/server.go +++ b/internal/pkg/agent/control/v2/server/server.go @@ -21,8 +21,11 @@ import ( "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" + "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/control" - "github.com/elastic/elastic-agent/internal/pkg/agent/control/cproto" + "github.com/elastic/elastic-agent/internal/pkg/agent/control/v1/proto" + v1server "github.com/elastic/elastic-agent/internal/pkg/agent/control/v1/server" + cproto "github.com/elastic/elastic-agent/internal/pkg/agent/control/v2/cproto" "github.com/elastic/elastic-agent/internal/pkg/diagnostics" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/component" @@ -33,23 +36,25 @@ import ( type Server struct { cproto.UnimplementedElasticAgentControlServer - logger *logger.Logger - agentInfo *info.AgentInfo - coord *coordinator.Coordinator - listener net.Listener - server *grpc.Server - tracer *apm.Tracer - diagHooks diagnostics.Hooks + logger *logger.Logger + agentInfo *info.AgentInfo + coord *coordinator.Coordinator + listener net.Listener + server *grpc.Server + tracer *apm.Tracer + diagHooks diagnostics.Hooks + grpcConfig *configuration.GRPCConfig } // New creates a new control protocol server. -func New(log *logger.Logger, agentInfo *info.AgentInfo, coord *coordinator.Coordinator, tracer *apm.Tracer, diagHooks diagnostics.Hooks) *Server { +func New(log *logger.Logger, agentInfo *info.AgentInfo, coord *coordinator.Coordinator, tracer *apm.Tracer, diagHooks diagnostics.Hooks, grpcConfig *configuration.GRPCConfig) *Server { return &Server{ - logger: log, - agentInfo: agentInfo, - coord: coord, - tracer: tracer, - diagHooks: diagHooks, + logger: log, + agentInfo: agentInfo, + coord: coord, + tracer: tracer, + diagHooks: diagHooks, + grpcConfig: grpcConfig, } } @@ -68,12 +73,15 @@ func (s *Server) Start() error { s.listener = lis if s.tracer != nil { apmInterceptor := apmgrpc.NewUnaryServerInterceptor(apmgrpc.WithRecovery(), apmgrpc.WithTracer(s.tracer)) - s.server = grpc.NewServer(grpc.UnaryInterceptor(apmInterceptor)) + s.server = grpc.NewServer(grpc.UnaryInterceptor(apmInterceptor), grpc.MaxRecvMsgSize(s.grpcConfig.MaxMsgSize)) } else { - s.server = grpc.NewServer() + s.server = grpc.NewServer(grpc.MaxRecvMsgSize(s.grpcConfig.MaxMsgSize)) } cproto.RegisterElasticAgentControlServer(s.server, s) + v1Wrapper := v1server.New(s.logger, s, s.tracer) + proto.RegisterElasticAgentControlServer(s.server, v1Wrapper) + // start serving GRPC connections go func() { err := s.server.Serve(lis) @@ -203,7 +211,7 @@ func (s *Server) DiagnosticAgent(ctx context.Context, _ *cproto.DiagnosticAgentR } // DiagnosticUnits returns diagnostic information for the specific units (or all units if non-provided). -func (s *Server) DiagnosticUnits(ctx context.Context, req *cproto.DiagnosticUnitsRequest) (*cproto.DiagnosticUnitsResponse, error) { +func (s *Server) DiagnosticUnits(req *cproto.DiagnosticUnitsRequest, srv cproto.ElasticAgentControl_DiagnosticUnitsServer) error { reqs := make([]runtime.ComponentUnitDiagnosticRequest, 0, len(req.Units)) for _, u := range req.Units { reqs = append(reqs, runtime.ComponentUnitDiagnosticRequest{ @@ -217,8 +225,7 @@ func (s *Server) DiagnosticUnits(ctx context.Context, req *cproto.DiagnosticUnit }) } - diag := s.coord.PerformDiagnostics(ctx, reqs...) - res := make([]*cproto.DiagnosticUnitResponse, 0, len(diag)) + diag := s.coord.PerformDiagnostics(srv.Context(), reqs...) for _, d := range diag { r := &cproto.DiagnosticUnitResponse{ ComponentId: d.Component.ID, @@ -243,7 +250,11 @@ func (s *Server) DiagnosticUnits(ctx context.Context, req *cproto.DiagnosticUnit } r.Results = results } - res = append(res, r) + + if err := srv.Send(r); err != nil { + return err + } } - return &cproto.DiagnosticUnitsResponse{Units: res}, nil + + return nil } diff --git a/internal/pkg/agent/install/uninstall.go b/internal/pkg/agent/install/uninstall.go index 9e3eb56e5cd..15e43cca9d1 100644 --- a/internal/pkg/agent/install/uninstall.go +++ b/internal/pkg/agent/install/uninstall.go @@ -167,7 +167,7 @@ func serviceComponentsFromConfig(specs component.RuntimeSpecs, cfg *config.Confi if err != nil { return nil, errors.New("failed to create a map from config", err) } - allComps, err := specs.ToComponents(mm, nil) + allComps, err := specs.ToComponents(mm, nil, logp.InfoLevel) if err != nil { return nil, fmt.Errorf("failed to render components: %w", err) } diff --git a/internal/pkg/agent/transpiler/ast_test.go b/internal/pkg/agent/transpiler/ast_test.go index 1e52d6c6f47..f90959a95b6 100644 --- a/internal/pkg/agent/transpiler/ast_test.go +++ b/internal/pkg/agent/transpiler/ast_test.go @@ -1840,7 +1840,7 @@ func TestLookupString(t *testing.T) { } func mustMakeVars(mapping map[string]interface{}) *Vars { - v, err := NewVars(mapping, nil) + v, err := NewVars("", mapping, nil) if err != nil { panic(err) } diff --git a/internal/pkg/agent/transpiler/utils.go b/internal/pkg/agent/transpiler/utils.go index 9090d62b262..4602e760b14 100644 --- a/internal/pkg/agent/transpiler/utils.go +++ b/internal/pkg/agent/transpiler/utils.go @@ -9,13 +9,20 @@ import ( "fmt" ) +const ( + // streamsKey is the name of the dictionary key for streams that an input can have. In the case that + // an input defines a set of streams and after conditions are applied all the streams are removed then + // the entire input is removed. + streamsKey = "streams" +) + // RenderInputs renders dynamic inputs section func RenderInputs(inputs Node, varsArray []*Vars) (Node, error) { l, ok := inputs.Value().(*List) if !ok { return nil, fmt.Errorf("inputs must be an array") } - nodes := []*Dict{} + var nodes []varIDMap nodesMap := map[string]*Dict{} for _, vars := range varsArray { for _, node := range l.Value().([]Node) { @@ -23,6 +30,10 @@ func RenderInputs(inputs Node, varsArray []*Vars) (Node, error) { if !ok { continue } + hadStreams := false + if streams := getStreams(dict); streams != nil { + hadStreams = true + } n, err := dict.Apply(vars) if errors.Is(err, ErrNoMatch) { // has a variable that didn't exist, so we ignore it @@ -37,21 +48,84 @@ func RenderInputs(inputs Node, varsArray []*Vars) (Node, error) { continue } dict = n.(*Dict) + if hadStreams { + streams := getStreams(dict) + if streams == nil { + // conditions removed all streams (input is removed) + continue + } + } hash := string(dict.Hash()) _, exists := nodesMap[hash] if !exists { nodesMap[hash] = dict - nodes = append(nodes, dict) + nodes = append(nodes, varIDMap{vars.ID(), dict}) } } } - nInputs := []Node{} + var nInputs []Node for _, node := range nodes { - nInputs = append(nInputs, promoteProcessors(node)) + if node.id != "" { + // vars has unique ID, concat ID onto existing ID + idNode, ok := node.d.Find("id") + if ok { + idKey, _ := idNode.(*Key) // always a Key + + // clone original and update its key to 'original_id' + origKey, _ := idKey.Clone().(*Key) // always a Key + origKey.name = "original_id" + node.d.Insert(origKey) + + // update id field to concat the id of the variable context set + switch idVal := idKey.value.(type) { + case *StrVal: + idVal.value = fmt.Sprintf("%s-%s", idVal.value, node.id) + case *IntVal: + idKey.value = NewStrVal(fmt.Sprintf("%d-%s", idVal.value, node.id)) + case *UIntVal: + idKey.value = NewStrVal(fmt.Sprintf("%d-%s", idVal.value, node.id)) + case *FloatVal: + idKey.value = NewStrVal(fmt.Sprintf("%f-%s", idVal.value, node.id)) + default: + return nil, fmt.Errorf("id field type invalid, expected string, int, uint, or float got: %T", idKey.value) + } + } else { + node.d.Insert(NewKey("id", NewStrVal(node.id))) + } + } + nInputs = append(nInputs, promoteProcessors(node.d)) } return NewList(nInputs), nil } +type varIDMap struct { + id string + d *Dict +} + +func getStreams(dict *Dict) *List { + node, ok := dict.Find(streamsKey) + if !ok { + return nil + } + key, ok := node.(*Key) + if !ok { + return nil + } + if key.value == nil { + return nil + } + list, ok := key.value.(*List) + if !ok { + return nil + } + if len(list.value) == 0 { + // didn't have any streams defined in the list (so no removal should be done) + return nil + } + return list +} + func promoteProcessors(dict *Dict) *Dict { p := dict.Processors() if p == nil { diff --git a/internal/pkg/agent/transpiler/utils_test.go b/internal/pkg/agent/transpiler/utils_test.go index 0de58a56d73..9c359167199 100644 --- a/internal/pkg/agent/transpiler/utils_test.go +++ b/internal/pkg/agent/transpiler/utils_test.go @@ -363,6 +363,7 @@ func TestRenderInputs(t *testing.T) { "vars with processors": { input: NewKey("inputs", NewList([]Node{ NewDict([]Node{ + NewKey("id", NewStrVal("initial")), NewKey("type", NewStrVal("logfile")), NewKey("streams", NewList([]Node{ NewDict([]Node{ @@ -385,6 +386,7 @@ func TestRenderInputs(t *testing.T) { })), expected: NewList([]Node{ NewDict([]Node{ + NewKey("id", NewStrVal("initial-value1")), NewKey("type", NewStrVal("logfile")), NewKey("streams", NewList([]Node{ NewDict([]Node{ @@ -411,8 +413,10 @@ func TestRenderInputs(t *testing.T) { })), }), })), + NewKey("original_id", NewStrVal("initial")), }), NewDict([]Node{ + NewKey("id", NewStrVal("initial-value2")), NewKey("type", NewStrVal("logfile")), NewKey("streams", NewList([]Node{ NewDict([]Node{ @@ -439,10 +443,11 @@ func TestRenderInputs(t *testing.T) { })), }), })), + NewKey("original_id", NewStrVal("initial")), }), }), varsArray: []*Vars{ - mustMakeVarsP(map[string]interface{}{ + mustMakeVarsP("value1", map[string]interface{}{ "var1": map[string]interface{}{ "name": "value1", }, @@ -458,7 +463,7 @@ func TestRenderInputs(t *testing.T) { }, }, }), - mustMakeVarsP(map[string]interface{}{ + mustMakeVarsP("value2", map[string]interface{}{ "var1": map[string]interface{}{ "name": "value2", }, @@ -499,6 +504,7 @@ func TestRenderInputs(t *testing.T) { })), }), })), + NewKey("id", NewStrVal("value1")), NewKey("processors", NewList([]Node{ NewDict([]Node{ NewKey("add_fields", NewDict([]Node{ @@ -519,6 +525,7 @@ func TestRenderInputs(t *testing.T) { })), }), })), + NewKey("id", NewStrVal("value2")), NewKey("processors", NewList([]Node{ NewDict([]Node{ NewKey("add_fields", NewDict([]Node{ @@ -532,7 +539,7 @@ func TestRenderInputs(t *testing.T) { }), }), varsArray: []*Vars{ - mustMakeVarsP(map[string]interface{}{ + mustMakeVarsP("value1", map[string]interface{}{ "var1": map[string]interface{}{ "name": "value1", }, @@ -548,7 +555,7 @@ func TestRenderInputs(t *testing.T) { }, }, }), - mustMakeVarsP(map[string]interface{}{ + mustMakeVarsP("value2", map[string]interface{}{ "var1": map[string]interface{}{ "name": "value2", }, @@ -599,6 +606,7 @@ func TestRenderInputs(t *testing.T) { NewKey("invalid", NewStrVal("value")), })), })), + NewKey("id", NewStrVal("value1")), }), NewDict([]Node{ NewKey("type", NewStrVal("logfile")), @@ -614,10 +622,11 @@ func TestRenderInputs(t *testing.T) { NewKey("invalid", NewStrVal("value")), })), })), + NewKey("id", NewStrVal("value2")), }), }), varsArray: []*Vars{ - mustMakeVarsP(map[string]interface{}{ + mustMakeVarsP("value1", map[string]interface{}{ "var1": map[string]interface{}{ "name": "value1", }, @@ -633,7 +642,7 @@ func TestRenderInputs(t *testing.T) { }, }, }), - mustMakeVarsP(map[string]interface{}{ + mustMakeVarsP("value2", map[string]interface{}{ "var1": map[string]interface{}{ "name": "value2", }, @@ -674,6 +683,7 @@ func TestRenderInputs(t *testing.T) { })), }), })), + NewKey("id", NewStrVal("value1")), NewKey("processors", NewList([]Node{ NewDict([]Node{ NewKey("add_fields", NewDict([]Node{ @@ -687,7 +697,7 @@ func TestRenderInputs(t *testing.T) { }), }), varsArray: []*Vars{ - mustMakeVarsP(map[string]interface{}{ + mustMakeVarsP("value1", map[string]interface{}{ "var1": map[string]interface{}{ "name": "value1", }, @@ -703,7 +713,7 @@ func TestRenderInputs(t *testing.T) { }, }, }), - mustMakeVarsP(map[string]interface{}{ + mustMakeVarsP("value2", map[string]interface{}{ "var1": map[string]interface{}{ "name": "value1", }, @@ -721,6 +731,44 @@ func TestRenderInputs(t *testing.T) { }), }, }, + "input removal with stream conditions": { + input: NewKey("inputs", NewList([]Node{ + NewDict([]Node{ + NewKey("type", NewStrVal("logfile")), + NewKey("streams", NewList([]Node{ + NewDict([]Node{ + NewKey("paths", NewList([]Node{ + NewStrVal("/var/log/${var1.name}.log"), + })), + NewKey("condition", NewStrVal("${var1.name} != 'value1'")), + }), + NewDict([]Node{ + NewKey("paths", NewList([]Node{ + NewStrVal("/var/log/${var1.name}.log"), + })), + NewKey("condition", NewStrVal("${var1.name} != 'value1'")), + }), + })), + }), + })), + expected: NewList([]Node{}), + varsArray: []*Vars{ + mustMakeVarsP("value1", map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value1", + }, + }, + "var1", + nil), + mustMakeVarsP("value2", map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value1", + }, + }, + "var1", + nil), + }, + }, } for name, test := range testcases { @@ -736,8 +784,8 @@ func TestRenderInputs(t *testing.T) { } } -func mustMakeVarsP(mapping map[string]interface{}, processorKey string, processors Processors) *Vars { - v, err := NewVarsWithProcessors(mapping, processorKey, processors, nil) +func mustMakeVarsP(id string, mapping map[string]interface{}, processorKey string, processors Processors) *Vars { + v, err := NewVarsWithProcessors(id, mapping, processorKey, processors, nil) if err != nil { panic(err) } diff --git a/internal/pkg/agent/transpiler/vars.go b/internal/pkg/agent/transpiler/vars.go index 96fbacd48c8..be8aa074095 100644 --- a/internal/pkg/agent/transpiler/vars.go +++ b/internal/pkg/agent/transpiler/vars.go @@ -21,6 +21,7 @@ var ErrNoMatch = fmt.Errorf("no matching vars") // Vars is a context of variables that also contain a list of processors that go with the mapping. type Vars struct { + id string tree *AST processorsKey string processors Processors @@ -28,17 +29,17 @@ type Vars struct { } // NewVars returns a new instance of vars. -func NewVars(mapping map[string]interface{}, fetchContextProviders mapstr.M) (*Vars, error) { - return NewVarsWithProcessors(mapping, "", nil, fetchContextProviders) +func NewVars(id string, mapping map[string]interface{}, fetchContextProviders mapstr.M) (*Vars, error) { + return NewVarsWithProcessors(id, mapping, "", nil, fetchContextProviders) } // NewVarsWithProcessors returns a new instance of vars with attachment of processors. -func NewVarsWithProcessors(mapping map[string]interface{}, processorKey string, processors Processors, fetchContextProviders mapstr.M) (*Vars, error) { +func NewVarsWithProcessors(id string, mapping map[string]interface{}, processorKey string, processors Processors, fetchContextProviders mapstr.M) (*Vars, error) { tree, err := NewAST(mapping) if err != nil { return nil, err } - return &Vars{tree, processorKey, processors, fetchContextProviders}, nil + return &Vars{id, tree, processorKey, processors, fetchContextProviders}, nil } // Replace returns a new value based on variable replacement. @@ -91,6 +92,11 @@ func (v *Vars) Replace(value string) (Node, error) { return NewStrValWithProcessors(result+value[lastIndex:], processors), nil } +// ID returns the unique ID for the vars. +func (v *Vars) ID() string { + return v.id +} + // Lookup returns the value from the vars. func (v *Vars) Lookup(name string) (interface{}, bool) { // lookup in the AST tree diff --git a/internal/pkg/agent/transpiler/vars_test.go b/internal/pkg/agent/transpiler/vars_test.go index 56e27694a33..76a1bbfd9d2 100644 --- a/internal/pkg/agent/transpiler/vars_test.go +++ b/internal/pkg/agent/transpiler/vars_test.go @@ -227,6 +227,7 @@ func TestVars_ReplaceWithProcessors(t *testing.T) { }, } vars, err := NewVarsWithProcessors( + "", map[string]interface{}{ "testing": map[string]interface{}{ "key1": "data1", @@ -293,6 +294,7 @@ func TestVars_ReplaceWithFetchContextProvider(t *testing.T) { "kubernetes_secrets": mockFetchProvider, } vars, err := NewVarsWithProcessors( + "id", map[string]interface{}{ "testing": map[string]interface{}{ "key1": "data1", diff --git a/internal/pkg/basecmd/restart/cmd.go b/internal/pkg/basecmd/restart/cmd.go index 0b8e8e9bce1..3b3cef9c6e6 100644 --- a/internal/pkg/basecmd/restart/cmd.go +++ b/internal/pkg/basecmd/restart/cmd.go @@ -10,7 +10,7 @@ import ( "github.com/spf13/cobra" "github.com/elastic/elastic-agent/internal/pkg/agent/control" - "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" + "github.com/elastic/elastic-agent/internal/pkg/agent/control/v2/client" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/cli" ) diff --git a/internal/pkg/basecmd/version/cmd.go b/internal/pkg/basecmd/version/cmd.go index 95f018e81a3..a0c8c35ba3a 100644 --- a/internal/pkg/basecmd/version/cmd.go +++ b/internal/pkg/basecmd/version/cmd.go @@ -11,7 +11,7 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v2" - "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" + "github.com/elastic/elastic-agent/internal/pkg/agent/control/v2/client" "github.com/elastic/elastic-agent/internal/pkg/cli" "github.com/elastic/elastic-agent/internal/pkg/release" ) diff --git a/internal/pkg/basecmd/version/cmd_test.go b/internal/pkg/basecmd/version/cmd_test.go index fb0e7e960bd..60d91cf629d 100644 --- a/internal/pkg/basecmd/version/cmd_test.go +++ b/internal/pkg/basecmd/version/cmd_test.go @@ -15,7 +15,8 @@ import ( "gopkg.in/yaml.v2" "github.com/elastic/elastic-agent-libs/logp" - "github.com/elastic/elastic-agent/internal/pkg/agent/control/server" + "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" + "github.com/elastic/elastic-agent/internal/pkg/agent/control/v2/server" "github.com/elastic/elastic-agent/internal/pkg/cli" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" @@ -57,7 +58,7 @@ func TestCmdBinaryOnlyYAML(t *testing.T) { } func TestCmdDaemon(t *testing.T) { - srv := server.New(newErrorLogger(t), nil, nil, apmtest.DiscardTracer, nil) + srv := server.New(newErrorLogger(t), nil, nil, apmtest.DiscardTracer, nil, configuration.DefaultGRPCConfig()) require.NoError(t, srv.Start()) defer srv.Stop() @@ -73,7 +74,7 @@ func TestCmdDaemon(t *testing.T) { } func TestCmdDaemonYAML(t *testing.T) { - srv := server.New(newErrorLogger(t), nil, nil, apmtest.DiscardTracer, nil) + srv := server.New(newErrorLogger(t), nil, nil, apmtest.DiscardTracer, nil, configuration.DefaultGRPCConfig()) require.NoError(t, srv.Start()) defer srv.Stop() diff --git a/internal/pkg/composable/controller.go b/internal/pkg/composable/controller.go index 0af5a0d93e8..9fed9c14c7f 100644 --- a/internal/pkg/composable/controller.go +++ b/internal/pkg/composable/controller.go @@ -173,6 +173,7 @@ func (c *controller) Run(ctx context.Context) error { } }() + close(c.ch) wg.Wait() return ctx.Err() case <-notify: @@ -193,20 +194,25 @@ func (c *controller) Run(ctx context.Context) error { mapping[name] = state.Current() } // this is ensured not to error, by how the mappings states are verified - vars[0], _ = transpiler.NewVars(mapping, fetchContextProviders) + vars[0], _ = transpiler.NewVars("", mapping, fetchContextProviders) // add to the vars list for each dynamic providers mappings for name, state := range c.dynamicProviders { for _, mappings := range state.Mappings() { local, _ := cloneMap(mapping) // will not fail; already been successfully cloned once local[name] = mappings.mapping + id := fmt.Sprintf("%s-%s", name, mappings.id) // this is ensured not to error, by how the mappings states are verified - v, _ := transpiler.NewVarsWithProcessors(local, name, mappings.processors, fetchContextProviders) + v, _ := transpiler.NewVarsWithProcessors(id, local, name, mappings.processors, fetchContextProviders) vars = append(vars, v) } } - c.ch <- vars + select { + case c.ch <- vars: + case <-ctx.Done(): + // coordinator is handling cancellation it won't drain the channel + } } } @@ -237,7 +243,7 @@ func (c *contextProviderState) Set(mapping map[string]interface{}) error { return err } // ensure creating vars will not error - _, err = transpiler.NewVars(mapping, nil) + _, err = transpiler.NewVars("", mapping, nil) if err != nil { return err } @@ -262,6 +268,7 @@ func (c *contextProviderState) Current() map[string]interface{} { } type dynamicProviderMapping struct { + id string priority int mapping map[string]interface{} processors transpiler.Processors @@ -292,7 +299,7 @@ func (c *dynamicProviderState) AddOrUpdate(id string, priority int, mapping map[ return err } // ensure creating vars will not error - _, err = transpiler.NewVars(mapping, nil) + _, err = transpiler.NewVars("", mapping, nil) if err != nil { return err } @@ -305,6 +312,7 @@ func (c *dynamicProviderState) AddOrUpdate(id string, priority int, mapping map[ return nil } c.mappings[id] = dynamicProviderMapping{ + id: id, priority: priority, mapping: mapping, processors: processors, diff --git a/internal/pkg/composable/controller_test.go b/internal/pkg/composable/controller_test.go index d4fdbb8fdfc..050fe78ed74 100644 --- a/internal/pkg/composable/controller_test.go +++ b/internal/pkg/composable/controller_test.go @@ -7,6 +7,7 @@ package composable_test import ( "context" "errors" + "fmt" "testing" "time" @@ -131,3 +132,85 @@ func TestController(t *testing.T) { require.True(t, ok) assert.Equal(t, "value2", localMap["key1"]) } + +func TestCancellation(t *testing.T) { + cfg, err := config.NewConfigFrom(map[string]interface{}{ + "providers": map[string]interface{}{ + "env": map[string]interface{}{ + "enabled": "false", + }, + "local": map[string]interface{}{ + "vars": map[string]interface{}{ + "key1": "value1", + }, + }, + "local_dynamic": map[string]interface{}{ + "items": []map[string]interface{}{ + { + "vars": map[string]interface{}{ + "key1": "value1", + }, + "processors": []map[string]interface{}{ + { + "add_fields": map[string]interface{}{ + "fields": map[string]interface{}{ + "add": "value1", + }, + "to": "dynamic", + }, + }, + }, + }, + { + "vars": map[string]interface{}{ + "key1": "value2", + }, + "processors": []map[string]interface{}{ + { + "add_fields": map[string]interface{}{ + "fields": map[string]interface{}{ + "add": "value2", + }, + "to": "dynamic", + }, + }, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + + log, err := logger.New("", false) + require.NoError(t, err) + + // try with variable deadlines + timeout := 50 * time.Millisecond + for i := 1; i <= 10; i++ { + t.Run(fmt.Sprintf("test run %d", i), func(t *testing.T) { + c, err := composable.New(log, cfg, false) + require.NoError(t, err) + ctx, cancelFn := context.WithTimeout(context.Background(), timeout) + defer cancelFn() + err = c.Run(ctx) + // test will time out and fail if cancellation is not proper + if err != nil { + require.True(t, errors.Is(err, context.DeadlineExceeded)) + } + }) + timeout += 10 * time.Millisecond + } + + t.Run("immediate cancellation", func(t *testing.T) { + c, err := composable.New(log, cfg, false) + require.NoError(t, err) + ctx, cancelFn := context.WithTimeout(context.Background(), 0) + cancelFn() + err = c.Run(ctx) + // test will time out and fail if cancellation is not proper + if err != nil { + require.True(t, errors.Is(err, context.DeadlineExceeded)) + } + }) +} diff --git a/internal/pkg/fleetapi/acker/lazy/lazy_acker.go b/internal/pkg/fleetapi/acker/lazy/lazy_acker.go index c2fd51f9775..298b2b5bf7f 100644 --- a/internal/pkg/fleetapi/acker/lazy/lazy_acker.go +++ b/internal/pkg/fleetapi/acker/lazy/lazy_acker.go @@ -80,18 +80,18 @@ func (f *Acker) Commit(ctx context.Context) (err error) { actions := f.queue f.queue = make([]fleetapi.Action, 0) - f.log.Debugf("lazy acker: ackbatch: %#v", actions) + f.log.Debugf("lazy acker: ack batch: %s", actions) var resp *fleetapi.AckResponse resp, err = f.acker.AckBatch(ctx, actions) // If request failed enqueue all actions with retrier if it is set if err != nil { if f.retrier != nil { - f.log.Errorf("lazy acker: failed ack batch, enqueue for retry: %#v", actions) + f.log.Errorf("lazy acker: failed ack batch, enqueue for retry: %s", actions) f.retrier.Enqueue(actions) return nil } - f.log.Errorf("lazy acker: failed ack batch, no retrier set, fail with err: %v", err) + f.log.Errorf("lazy acker: failed ack batch, no retrier set, fail with err: %s", err) return err } @@ -107,7 +107,7 @@ func (f *Acker) Commit(ctx context.Context) (err error) { } } if len(failed) > 0 { - f.log.Infof("lazy acker: partially failed ack batch, enqueue for retry: %#v", failed) + f.log.Infof("lazy acker: partially failed ack batch, enqueue for retry: %s", failed) f.retrier.Enqueue(failed) } } diff --git a/internal/pkg/fleetapi/acker/lazy/lazy_acker_test.go b/internal/pkg/fleetapi/acker/lazy/lazy_acker_test.go index f0384a07b6d..3766a57fbf4 100644 --- a/internal/pkg/fleetapi/acker/lazy/lazy_acker_test.go +++ b/internal/pkg/fleetapi/acker/lazy/lazy_acker_test.go @@ -12,6 +12,7 @@ import ( "github.com/google/go-cmp/cmp" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -82,7 +83,11 @@ func TestLazyAcker(t *testing.T) { ctx, cn := context.WithCancel(context.Background()) defer cn() - log, _ := logger.New("", false) + cfg := logger.DefaultLoggingConfig() + cfg.Level = logp.DebugLevel + // cfg.ToFiles = false + cfg.ToStderr = true + log, _ := logger.NewFromConfig("", cfg, true) // Tests tests := []struct { diff --git a/internal/pkg/remote/client.go b/internal/pkg/remote/client.go index 5c8fd5c9a34..af77db8d0c2 100644 --- a/internal/pkg/remote/client.go +++ b/internal/pkg/remote/client.go @@ -174,6 +174,7 @@ func (c *Client) Send( "fail to create HTTP request using method %s to %s: %w", method, path, err) } + c.log.Debugf("Creating new request to request URL %s", req.URL.String()) // Add generals headers to the request, we are dealing exclusively with JSON. // Content-Type / Accepted type can be overridden by the caller. diff --git a/magefile.go b/magefile.go index 5a5e9af64ee..91b46f7566c 100644 --- a/magefile.go +++ b/magefile.go @@ -38,6 +38,7 @@ import ( // mage:import "github.com/elastic/elastic-agent/dev-tools/mage/target/test" + "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" ) @@ -365,29 +366,12 @@ func Package() { start := time.Now() defer func() { fmt.Println("package ran for", time.Since(start)) }() - platformPackages := []struct { - platform string - packages string - }{ - {"darwin/amd64", "darwin-x86_64.tar.gz"}, - {"darwin/arm64", "darwin-aarch64.tar.gz"}, - {"linux/amd64", "linux-x86_64.tar.gz"}, - {"linux/arm64", "linux-arm64.tar.gz"}, - {"windows/amd64", "windows-x86_64.zip"}, + platforms := devtools.Platforms.Names() + if len(platforms) == 0 { + panic("elastic-agent package is expected to build at least one platform package") } - var requiredPackages []string - for _, p := range platformPackages { - if _, enabled := devtools.Platforms.Get(p.platform); enabled { - requiredPackages = append(requiredPackages, p.packages) - } - } - - if len(requiredPackages) == 0 { - panic("elastic-agent package is expected to include other packages") - } - - packageAgent(requiredPackages, devtools.UseElasticAgentPackaging) + packageAgent(platforms, devtools.UseElasticAgentPackaging) } func getPackageName(beat, version, pkg string) (string, string) { @@ -466,11 +450,19 @@ func Config() { // ControlProto generates pkg/agent/control/proto module. func ControlProto() error { + if err := sh.RunV( + "protoc", + "--go_out=internal/pkg/agent/control/v2/cproto", "--go_opt=paths=source_relative", + "--go-grpc_out=internal/pkg/agent/control/v2/cproto", "--go-grpc_opt=paths=source_relative", + "control_v2.proto"); err != nil { + return err + } + return sh.RunV( "protoc", - "--go_out=internal/pkg/agent/control/cproto", "--go_opt=paths=source_relative", - "--go-grpc_out=internal/pkg/agent/control/cproto", "--go-grpc_opt=paths=source_relative", - "control.proto") + "--go_out=internal/pkg/agent/control/v1/proto", "--go_opt=paths=source_relative", + "--go-grpc_out=internal/pkg/agent/control/v1/proto", "--go-grpc_opt=paths=source_relative", + "control_v1.proto") } // FakeShipperProto generates pkg/component/fake/common event protocol. @@ -575,7 +567,7 @@ func runAgent(env map[string]string) error { if !strings.Contains(dockerImageOut, tag) { // produce docker package packageAgent([]string{ - "linux-x86_64.tar.gz", + "linux/amd64", }, devtools.UseElasticAgentDemoPackaging) dockerPackagePath := filepath.Join("build", "package", "elastic-agent", "elastic-agent-linux-amd64.docker", "docker-build") @@ -623,7 +615,7 @@ func runAgent(env map[string]string) error { return sh.Run("docker", dockerCmdArgs...) } -func packageAgent(requiredPackages []string, packagingFn func()) { +func packageAgent(platforms []string, packagingFn func()) { version, found := os.LookupEnv("BEAT_VERSION") if !found { version = release.Version() @@ -632,6 +624,19 @@ func packageAgent(requiredPackages []string, packagingFn func()) { dropPath, found := os.LookupEnv(agentDropPath) var archivePath string + platformPackages := map[string]string{ + "darwin/amd64": "darwin-x86_64.tar.gz", + "darwin/arm64": "darwin-aarch64.tar.gz", + "linux/amd64": "linux-x86_64.tar.gz", + "linux/arm64": "linux-arm64.tar.gz", + "windows/amd64": "windows-x86_64.zip", + } + + requiredPackages := []string{} + for _, p := range platforms { + requiredPackages = append(requiredPackages, platformPackages[p]) + } + // build deps only when drop is not provided if !found || len(dropPath) == 0 { // prepare new drop @@ -650,24 +655,29 @@ func packageAgent(requiredPackages []string, packagingFn func()) { defer os.Unsetenv(agentDropPath) if devtools.ExternalBuild == true { - // for external go for all dependencies - dependencies := []string{ - "auditbeat", "filebeat", "heartbeat", "metricbeat", "osquerybeat", "packetbeat", // beat dependencies - "apm-server", + externalBinaries := []string{ + "auditbeat", "filebeat", "heartbeat", "metricbeat", "osquerybeat", "packetbeat", // "cloudbeat", // TODO: add once working "elastic-agent-shipper", + "apm-server", "endpoint-security", "fleet-server", } + ctx := context.Background() - for _, beat := range dependencies { - for _, reqPackage := range requiredPackages { + for _, binary := range externalBinaries { + for _, platform := range platforms { + reqPackage := platformPackages[platform] targetPath := filepath.Join(archivePath, reqPackage) os.MkdirAll(targetPath, 0755) - newVersion, packageName := getPackageName(beat, version, reqPackage) - err := fetchBinaryFromArtifactsApi(ctx, packageName, beat, newVersion, targetPath) + newVersion, packageName := getPackageName(binary, version, reqPackage) + err := fetchBinaryFromArtifactsApi(ctx, packageName, binary, newVersion, targetPath) if err != nil { - panic(fmt.Sprintf("fetchBinaryFromArtifactsApi failed: %v", err)) + if strings.Contains(err.Error(), "object not found") { + fmt.Printf("Downloading %s: unsupported on %s, skipping\n", binary, platform) + } else { + panic(fmt.Sprintf("fetchBinaryFromArtifactsApi failed: %v", err)) + } } } } @@ -893,6 +903,14 @@ func movePackagesToArchive(dropPath string, requiredPackages []string) string { } func fetchBinaryFromArtifactsApi(ctx context.Context, packageName, artifact, version, downloadPath string) error { + // Only log fatal logs for logs produced using logrus. This is the global logger + // used by github.com/elastic/e2e-testing/pkg/downloads which can only be configured globally like this or via + // environment variables. + // + // Using FatalLevel avoids filling the build log with scary looking errors when we attempt to + // download artifacts on unsupported platforms and choose to ignore the errors. + logrus.SetLevel(logrus.FatalLevel) + location, err := downloads.FetchBeatsBinary( ctx, packageName, @@ -902,8 +920,11 @@ func fetchBinaryFromArtifactsApi(ctx context.Context, packageName, artifact, ver false, downloadPath, true) - fmt.Println("downloaded binaries on location:", location) + if err != nil { + return err + } + fmt.Println("downloaded binaries on", location) return err } diff --git a/pkg/component/component.go b/pkg/component/component.go index fa0a5d6e9f5..8afd94b0df5 100644 --- a/pkg/component/component.go +++ b/pkg/component/component.go @@ -10,6 +10,8 @@ import ( "sort" "strings" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent-client/v7/pkg/proto" @@ -19,7 +21,7 @@ import ( ) // GenerateMonitoringCfgFn is a function that can inject information into the model generation process. -type GenerateMonitoringCfgFn func(map[string]interface{}, map[string]string) (map[string]interface{}, error) +type GenerateMonitoringCfgFn func(map[string]interface{}, []Component, map[string]string) (map[string]interface{}, error) const ( // defaultUnitLogLevel is the default log level that a unit will get if one is not defined. @@ -103,21 +105,21 @@ func (c *Component) Type() string { } // ToComponents returns the components that should be running based on the policy and the current runtime specification. -func (r *RuntimeSpecs) ToComponents(policy map[string]interface{}, monitoringInjector GenerateMonitoringCfgFn) ([]Component, error) { - components, binaryMapping, err := r.PolicyToComponents(policy) +func (r *RuntimeSpecs) ToComponents(policy map[string]interface{}, monitoringInjector GenerateMonitoringCfgFn, ll logp.Level) ([]Component, error) { + components, binaryMapping, err := r.PolicyToComponents(policy, ll) if err != nil { return nil, err } if monitoringInjector != nil { - monitoringCfg, err := monitoringInjector(policy, binaryMapping) + monitoringCfg, err := monitoringInjector(policy, components, binaryMapping) if err != nil { return nil, fmt.Errorf("failed to inject monitoring: %w", err) } if monitoringCfg != nil { // monitoring is enabled - monitoringComps, _, err := r.PolicyToComponents(monitoringCfg) + monitoringComps, _, err := r.PolicyToComponents(monitoringCfg, ll) if err != nil { return nil, fmt.Errorf("failed to generate monitoring components: %w", err) } @@ -131,9 +133,8 @@ func (r *RuntimeSpecs) ToComponents(policy map[string]interface{}, monitoringInj // PolicyToComponents takes the policy and generated a component model along with providing a mapping between component // and the running binary. -func (r *RuntimeSpecs) PolicyToComponents(policy map[string]interface{}) ([]Component, map[string]string, error) { - const revision = "revision" - outputsMap, err := toIntermediate(policy) +func (r *RuntimeSpecs) PolicyToComponents(policy map[string]interface{}, ll logp.Level) ([]Component, map[string]string, error) { + outputsMap, err := toIntermediate(policy, r.aliasMapping, ll) if err != nil { return nil, nil, err } @@ -146,7 +147,7 @@ func (r *RuntimeSpecs) PolicyToComponents(policy map[string]interface{}) ([]Comp if err != nil { return nil, nil, err } - vars, err := transpiler.NewVars(map[string]interface{}{ + vars, err := transpiler.NewVars("", map[string]interface{}{ "runtime": map[string]interface{}{ "platform": r.platform.String(), "os": r.platform.OS, @@ -222,11 +223,11 @@ func (r *RuntimeSpecs) PolicyToComponents(policy map[string]interface{}) ([]Comp // skip; not enabled continue } - if v, ok := policy[revision]; ok { - input.input["policy"] = map[string]interface{}{ - revision: v, - } - } + + // Inject the top level fleet policy revision into each into configuration. This + // allows individual inputs (like endpoint) to detect policy changes more easily. + injectInputPolicyID(policy, input.input) + cfg, cfgErr := ExpectedConfig(input.input) if cfg != nil { cfg.Type = inputType // ensure alias is replaced in the ExpectedConfig to be non-alias type @@ -243,7 +244,7 @@ func (r *RuntimeSpecs) PolicyToComponents(policy map[string]interface{}) ([]Comp componentID := fmt.Sprintf("%s-%s", inputType, outputName) if usingShipper { // using shipper for this component - connected, _ := shipperMap[supportedShipper.ShipperType] + connected := shipperMap[supportedShipper.ShipperType] connected = append(connected, componentID) shipperMap[supportedShipper.ShipperType] = connected } else { @@ -326,6 +327,35 @@ func (r *RuntimeSpecs) PolicyToComponents(policy map[string]interface{}) ([]Comp return components, componentIdsInputMap, nil } +// Injects or creates a policy.revision sub-object in the input map. +func injectInputPolicyID(fleetPolicy map[string]interface{}, input map[string]interface{}) { + if input == nil { + return + } + + // If there is no top level fleet policy revision, there's nothing to inject. + revision, exists := fleetPolicy["revision"] + if !exists { + return + } + + // Check if a policy key exists with a non-nil policy object. + policyObj, exists := input["policy"] + if exists && policyObj != nil { + // If the policy object converts to map[string]interface{}, inject the revision key. + // Note that if the interface conversion here fails, we do nothing because we don't + // know what type of object exists with the policy key. + if policyMap, ok := policyObj.(map[string]interface{}); ok { + policyMap["revision"] = revision + } + } else { + // If there was no policy key or the value was nil, then inject a policy object with a revision key. + input["policy"] = map[string]interface{}{ + "revision": revision, + } + } +} + func componentToShipperConfig(comp Component) (*proto.UnitExpectedConfig, error) { cfgUnits := make([]interface{}, 0, len(comp.Units)) for _, unit := range comp.Units { @@ -391,7 +421,7 @@ func getSupportedShipper(r *RuntimeSpecs, output outputI, inputSpec InputRuntime // toIntermediate takes the policy and returns it into an intermediate representation that is easier to map into a set // of components. -func toIntermediate(policy map[string]interface{}) (map[string]outputI, error) { +func toIntermediate(policy map[string]interface{}, aliasMapping map[string]string, ll logp.Level) (map[string]outputI, error) { const ( outputsKey = "outputs" enabledKey = "enabled" @@ -436,7 +466,7 @@ func toIntermediate(policy map[string]interface{}) (map[string]outputI, error) { enabled = enabledVal delete(output, enabledKey) } - logLevel, err := getLogLevel(output) + logLevel, err := getLogLevel(output, ll) if err != nil { return nil, fmt.Errorf("invalid 'outputs.%s.log_level', %w", name, err) } @@ -473,6 +503,11 @@ func toIntermediate(policy map[string]interface{}) (map[string]outputI, error) { if !ok { return nil, fmt.Errorf("invalid 'inputs.%d.type', expected a string not a %T", idx, typeRaw) } + if realInputType, found := aliasMapping[t]; found { + t = realInputType + // by replacing type we make sure component understands aliasing + input[typeKey] = t + } idRaw, ok := input[idKey] if !ok { // no ID; fallback to type @@ -483,7 +518,7 @@ func toIntermediate(policy map[string]interface{}) (map[string]outputI, error) { return nil, fmt.Errorf("invalid 'inputs.%d.id', expected a string not a %T", idx, idRaw) } if hasDuplicate(outputsMap, id) { - return nil, fmt.Errorf("invalid 'inputs.%d.id', has a duplicate id %q (id is required to be unique)", idx, id) + return nil, fmt.Errorf("invalid 'inputs.%d.id', has a duplicate id %q. Please add a unique value for the 'id' key to each input in the agent policy", idx, id) } outputName := "default" if outputRaw, ok := input[useKey]; ok { @@ -507,7 +542,7 @@ func toIntermediate(policy map[string]interface{}) (map[string]outputI, error) { enabled = enabledVal delete(input, enabledKey) } - logLevel, err := getLogLevel(input) + logLevel, err := getLogLevel(input, ll) if err != nil { return nil, fmt.Errorf("invalid 'inputs.%d.log_level', %w", idx, err) } @@ -578,10 +613,13 @@ func hasDuplicate(outputsMap map[string]outputI, id string) bool { return false } -func getLogLevel(val map[string]interface{}) (client.UnitLogLevel, error) { +func getLogLevel(val map[string]interface{}, ll logp.Level) (client.UnitLogLevel, error) { const logLevelKey = "log_level" - logLevel := defaultUnitLogLevel + logLevel, err := stringToLogLevel(ll.String()) + if err != nil { + return defaultUnitLogLevel, err + } if logLevelRaw, ok := val[logLevelKey]; ok { logLevelStr, ok := logLevelRaw.(string) if !ok { diff --git a/pkg/component/component_test.go b/pkg/component/component_test.go index c9340e9c698..5a72eb84037 100644 --- a/pkg/component/component_test.go +++ b/pkg/component/component_test.go @@ -12,6 +12,8 @@ import ( "sort" "testing" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/google/go-cmp/cmp" "google.golang.org/protobuf/testing/protocmp" "google.golang.org/protobuf/types/known/structpb" @@ -36,6 +38,7 @@ func TestToComponents(t *testing.T) { Name string Platform PlatformDetail Policy map[string]interface{} + LogLevel logp.Level Err string Result []Component }{ @@ -192,7 +195,7 @@ func TestToComponents(t *testing.T) { }, }, }, - Err: `invalid 'inputs.1.id', has a duplicate id "filestream" (id is required to be unique)`, + Err: `invalid 'inputs.1.id', has a duplicate id "filestream". Please add a unique value for the 'id' key to each input in the agent policy`, }, { Name: "Invalid: inputs entry id not a string", @@ -320,58 +323,6 @@ func TestToComponents(t *testing.T) { }, }, }, - { - Name: "Invalid: inputs endpoint not support on container platform", - Platform: PlatformDetail{ - Platform: Platform{ - OS: Container, - Arch: AMD64, - GOOS: Linux, - }, - }, - Policy: map[string]interface{}{ - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "type": "elasticsearch", - "enabled": true, - }, - }, - "inputs": []interface{}{ - map[string]interface{}{ - "type": "endpoint", - "id": "endpoint-0", - "use_output": "default", - "enabled": true, - }, - }, - }, - Result: []Component{ - { - ID: "endpoint-default", - InputSpec: &InputRuntimeSpec{}, - Err: ErrInputNotSupportedOnPlatform, - Units: []Unit{ - { - ID: "endpoint-default", - Type: client.UnitTypeOutput, - LogLevel: defaultUnitLogLevel, - Config: MustExpectedConfig(map[string]interface{}{ - "type": "elasticsearch", - }), - }, - { - ID: "endpoint-default-endpoint-0", - Type: client.UnitTypeInput, - LogLevel: defaultUnitLogLevel, - Config: MustExpectedConfig(map[string]interface{}{ - "type": "endpoint", - "id": "endpoint-0", - }), - }, - }, - }, - }, - }, { Name: "Invalid: inputs endpoint doesn't support logstash", Platform: linuxAMD64Platform, @@ -383,19 +334,19 @@ func TestToComponents(t *testing.T) { }, "inputs": []interface{}{ map[string]interface{}{ - "type": "endpoint", - "id": "endpoint-0", + "type": "fleet-server", + "id": "fleet-server-0", }, }, }, Result: []Component{ { - ID: "endpoint-default", + ID: "fleet-server-default", InputSpec: &InputRuntimeSpec{}, Err: ErrOutputNotSupported, Units: []Unit{ { - ID: "endpoint-default", + ID: "fleet-server-default", Type: client.UnitTypeOutput, LogLevel: defaultUnitLogLevel, Config: MustExpectedConfig(map[string]interface{}{ @@ -403,12 +354,12 @@ func TestToComponents(t *testing.T) { }), }, { - ID: "endpoint-default-endpoint-0", + ID: "fleet-server-default-fleet-server-0", Type: client.UnitTypeInput, LogLevel: defaultUnitLogLevel, Config: MustExpectedConfig(map[string]interface{}{ - "type": "endpoint", - "id": "endpoint-0", + "type": "fleet-server", + "id": "fleet-server-0", }), }, }, @@ -626,6 +577,113 @@ func TestToComponents(t *testing.T) { }, }, }, + { + Name: "Debug log level", + Platform: linuxAMD64Platform, + LogLevel: logp.DebugLevel, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "enabled": true, + }, + }, + "inputs": []interface{}{ + map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + "enabled": true, + }, + map[string]interface{}{ + "type": "filestream", + "id": "filestream-1", + "enabled": false, + }, + }, + }, + Result: []Component{ + { + InputSpec: &InputRuntimeSpec{ + InputType: "filestream", + BinaryName: "filebeat", + BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), + }, + Units: []Unit{ + { + ID: "filestream-default", + Type: client.UnitTypeOutput, + LogLevel: client.UnitLogLevelDebug, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "elasticsearch", + }), + }, + { + ID: "filestream-default-filestream-0", + Type: client.UnitTypeInput, + LogLevel: client.UnitLogLevelDebug, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + }), + }, + }, + }, + }, + }, + { + Name: "Unique log level", + Platform: linuxAMD64Platform, + LogLevel: logp.ErrorLevel, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "enabled": true, + }, + }, + "inputs": []interface{}{ + map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + "enabled": true, + "log_level": "debug", + }, + map[string]interface{}{ + "type": "filestream", + "id": "filestream-1", + "enabled": false, + }, + }, + }, + Result: []Component{ + { + InputSpec: &InputRuntimeSpec{ + InputType: "filestream", + BinaryName: "filebeat", + BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), + }, + Units: []Unit{ + { + ID: "filestream-default", + Type: client.UnitTypeOutput, + LogLevel: client.UnitLogLevelError, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "elasticsearch", + }), + }, + { + ID: "filestream-default-filestream-0", + Type: client.UnitTypeInput, + LogLevel: client.UnitLogLevelDebug, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + }), + }, + }, + }, + }, + }, { Name: "Complex representation", Platform: linuxAMD64Platform, @@ -790,7 +848,7 @@ func TestToComponents(t *testing.T) { Type: client.UnitTypeInput, LogLevel: defaultUnitLogLevel, Config: mustExpectedConfigForceType(map[string]interface{}{ - "type": "logfile", + "type": "log", "id": "logfile-0", }, "log"), }, @@ -825,7 +883,7 @@ func TestToComponents(t *testing.T) { Type: client.UnitTypeInput, LogLevel: defaultUnitLogLevel, Config: mustExpectedConfigForceType(map[string]interface{}{ - "type": "logfile", + "type": "log", "id": "logfile-2", }, "log"), }, @@ -851,7 +909,7 @@ func TestToComponents(t *testing.T) { Type: client.UnitTypeInput, LogLevel: defaultUnitLogLevel, Config: mustExpectedConfigForceType(map[string]interface{}{ - "type": "logfile", + "type": "log", "id": "logfile-3", }, "log"), }, @@ -877,7 +935,7 @@ func TestToComponents(t *testing.T) { Type: client.UnitTypeInput, LogLevel: defaultUnitLogLevel, Config: mustExpectedConfigForceType(map[string]interface{}{ - "type": "logfile", + "type": "log", "id": "logfile-4", }, "log"), }, @@ -1189,7 +1247,7 @@ func TestToComponents(t *testing.T) { Type: client.UnitTypeInput, LogLevel: defaultUnitLogLevel, Config: mustExpectedConfigForceType(map[string]interface{}{ - "type": "logfile", + "type": "log", "id": "logfile-0", }, "log"), }, @@ -1250,7 +1308,7 @@ func TestToComponents(t *testing.T) { map[string]interface{}{ "id": "log-default-logfile-0", "config": map[string]interface{}{ - "type": "logfile", + "type": "log", "id": "logfile-0", }, }, @@ -1299,7 +1357,7 @@ func TestToComponents(t *testing.T) { Type: client.UnitTypeInput, LogLevel: defaultUnitLogLevel, Config: mustExpectedConfigForceType(map[string]interface{}{ - "type": "logfile", + "type": "log", "id": "logfile-2", }, "log"), }, @@ -1326,7 +1384,7 @@ func TestToComponents(t *testing.T) { Type: client.UnitTypeInput, LogLevel: defaultUnitLogLevel, Config: mustExpectedConfigForceType(map[string]interface{}{ - "type": "logfile", + "type": "log", "id": "logfile-3", }, "log"), }, @@ -1354,7 +1412,7 @@ func TestToComponents(t *testing.T) { map[string]interface{}{ "id": "log-stashit-logfile-3", "config": map[string]interface{}{ - "type": "logfile", + "type": "log", "id": "logfile-3", }, }, @@ -1393,7 +1451,7 @@ func TestToComponents(t *testing.T) { Type: client.UnitTypeInput, LogLevel: defaultUnitLogLevel, Config: mustExpectedConfigForceType(map[string]interface{}{ - "type": "logfile", + "type": "log", "id": "logfile-4", }, "log"), }, @@ -1421,7 +1479,7 @@ func TestToComponents(t *testing.T) { map[string]interface{}{ "id": "log-redis-logfile-4", "config": map[string]interface{}{ - "type": "logfile", + "type": "log", "id": "logfile-4", }, }, @@ -1469,6 +1527,67 @@ func TestToComponents(t *testing.T) { }, }, }, + { + Name: "Alias representation", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "enabled": true, + }, + }, + "inputs": []interface{}{ + map[string]interface{}{ + "type": "logfile", + "id": "some-id", + "enabled": true, + }, + map[string]interface{}{ + "type": "log", + "id": "log-1", + "enabled": true, + }, + }, + }, + Result: []Component{ + { + InputSpec: &InputRuntimeSpec{ + InputType: "log", + BinaryName: "filebeat", + BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), + }, + Units: []Unit{ + { + ID: "log-default", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "elasticsearch", + }), + }, + { + ID: "log-default-some-id", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "log", + "id": "some-id", + }), + }, + { + ID: "log-default-log-1", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "log", + "id": "log-1", + }), + }, + }, + }, + }, + }, } for _, scenario := range scenarios { @@ -1476,7 +1595,7 @@ func TestToComponents(t *testing.T) { runtime, err := LoadRuntimeSpecs(filepath.Join("..", "..", "specs"), scenario.Platform, SkipBinaryCheck()) require.NoError(t, err) - result, err := runtime.ToComponents(scenario.Policy, nil) + result, err := runtime.ToComponents(scenario.Policy, nil, scenario.LogLevel) if scenario.Err != "" { assert.Equal(t, scenario.Err, err.Error()) } else { @@ -1494,6 +1613,9 @@ func TestToComponents(t *testing.T) { assert.Equal(t, expected.InputSpec.InputType, actual.InputSpec.InputType) assert.Equal(t, expected.InputSpec.BinaryName, actual.InputSpec.BinaryName) assert.Equal(t, expected.InputSpec.BinaryPath, actual.InputSpec.BinaryPath) + for i, eu := range expected.Units { + assert.EqualValues(t, eu.Config, actual.Units[i].Config) + } assert.EqualValues(t, expected.Units, actual.Units) if expected.Shipper != nil { assert.Equal(t, *expected.Shipper, *actual.Shipper) @@ -1518,6 +1640,68 @@ func TestToComponents(t *testing.T) { } } +func TestInjectingInputPolicyID(t *testing.T) { + const testRevision = 10 + fleetPolicy := map[string]interface{}{ + "revision": testRevision, + } + + tests := []struct { + name string + policy map[string]interface{} + in map[string]interface{} + out map[string]interface{} + }{ + {"NilEverything", nil, nil, nil}, + {"NilInput", fleetPolicy, nil, nil}, + {"NilPolicy", nil, + map[string]interface{}{}, + map[string]interface{}{}, + }, + {"EmptyPolicy", map[string]interface{}{}, + map[string]interface{}{}, + map[string]interface{}{}, + }, + {"CreatePolicyRevision", fleetPolicy, + map[string]interface{}{}, + map[string]interface{}{ + "policy": map[string]interface{}{"revision": testRevision}, + }, + }, + {"NilPolicyObjectType", fleetPolicy, + map[string]interface{}{ + "policy": nil, + }, + map[string]interface{}{ + "policy": map[string]interface{}{"revision": testRevision}, + }, + }, + {"InjectPolicyRevision", fleetPolicy, + map[string]interface{}{ + "policy": map[string]interface{}{"key": "value"}, + }, + map[string]interface{}{ + "policy": map[string]interface{}{"key": "value", "revision": testRevision}, + }, + }, + {"UnknownPolicyObjectType", fleetPolicy, + map[string]interface{}{ + "policy": map[string]int{"key": 10}, + }, + map[string]interface{}{ + "policy": map[string]int{"key": 10}, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + injectInputPolicyID(tc.policy, tc.in) + assert.Equal(t, tc.out, tc.in) + }) + } +} + func assertEqualUnitExpectedConfigs(t *testing.T, expected *Unit, actual *Unit) { t.Helper() assert.Equal(t, expected.ID, actual.ID) diff --git a/pkg/component/load_test.go b/pkg/component/load_test.go index f9d88ebba26..53d8c0c768a 100644 --- a/pkg/component/load_test.go +++ b/pkg/component/load_test.go @@ -32,12 +32,6 @@ func TestLoadRuntimeSpecs(t *testing.T) { // unknown input _, err = runtime.GetInput("unknown") require.ErrorIs(t, err, ErrInputNotSupported) - - // endpoint not support on container platforms - if platform.OS == "container" { - _, err = runtime.GetInput("endpoint") - assert.ErrorIs(t, err, ErrInputNotSupportedOnPlatform) - } }) } } @@ -59,6 +53,10 @@ func TestLoadSpec_Components(t *testing.T) { Name: "Cloudbeat", Path: "cloudbeat.spec.yml", }, + { + Name: "Cloud Defend", + Path: "cloud-defend.spec.yml", + }, { Name: "Endpoint Security", Path: "endpoint-security.spec.yml", diff --git a/pkg/component/runtime/command.go b/pkg/component/runtime/command.go index 2575a35d5f1..94fc4428b74 100644 --- a/pkg/component/runtime/command.go +++ b/pkg/component/runtime/command.go @@ -12,14 +12,19 @@ import ( "os/exec" "path/filepath" "runtime" + "strings" "time" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/pkg/utils" + "go.uber.org/zap/zapcore" + "golang.org/x/time/rate" "github.com/elastic/elastic-agent-client/v7/pkg/client" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/pkg/component" + "github.com/elastic/elastic-agent/pkg/core/logger" "github.com/elastic/elastic-agent/pkg/core/process" + "github.com/elastic/elastic-agent/pkg/utils" ) type actionMode int @@ -39,7 +44,7 @@ const ( type MonitoringManager interface { EnrichArgs(string, string, []string) []string - Prepare() error + Prepare(string) error Cleanup(string) error } @@ -50,6 +55,10 @@ type procState struct { // CommandRuntime provides the command runtime for running a component as a subprocess. type CommandRuntime struct { + logger *logger.Logger + logStd *logWriter + logErr *logWriter + current component.Component monitor MonitoringManager @@ -64,10 +73,11 @@ type CommandRuntime struct { state ComponentState lastCheckin time.Time missedCheckins int + restartBucket *rate.Limiter } // NewCommandRuntime creates a new command runtime for the provided component. -func NewCommandRuntime(comp component.Component, monitor MonitoringManager) (ComponentRuntime, error) { +func NewCommandRuntime(comp component.Component, logger *logger.Logger, monitor MonitoringManager) (ComponentRuntime, error) { c := &CommandRuntime{ current: comp, monitor: monitor, @@ -82,6 +92,18 @@ func NewCommandRuntime(comp component.Component, monitor MonitoringManager) (Com if cmdSpec == nil { return nil, errors.New("must have command defined in specification") } + c.logger = logger.With("component", map[string]interface{}{ + "id": comp.ID, + "type": c.getSpecType(), + "binary": c.getSpecBinaryName(), + }) + ll, unitLevels := getLogLevels(comp) + c.logStd = createLogWriter(c.current, c.getCommandSpec(), c.getSpecType(), c.getSpecBinaryName(), ll, unitLevels, logSourceStdout) + ll, unitLevels = getLogLevels(comp) // don't want to share mapping of units (so new map is generated) + c.logErr = createLogWriter(c.current, c.getCommandSpec(), c.getSpecType(), c.getSpecBinaryName(), ll, unitLevels, logSourceStderr) + + c.restartBucket = newRateLimiter(cmdSpec.RestartMonitoringPeriod, cmdSpec.MaxRestartsPerPeriod) + return c, nil } @@ -124,6 +146,8 @@ func (c *CommandRuntime) Run(ctx context.Context, comm Communicator) error { } } case newComp := <-c.compCh: + c.current = newComp + c.syncLogLevels() sendExpected := c.state.syncExpected(&newComp) changed := c.state.syncUnits(&newComp) if sendExpected || c.state.unsettled() { @@ -293,7 +317,7 @@ func (c *CommandRuntime) start(comm Communicator) error { return fmt.Errorf("execution of component prevented: %w", err) } - if err := c.monitor.Prepare(); err != nil { + if err := c.monitor.Prepare(c.current.ID); err != nil { return err } args := c.monitor.EnrichArgs(c.current.ID, c.getSpecBinaryName(), cmdSpec.Args) @@ -303,15 +327,22 @@ func (c *CommandRuntime) start(comm Communicator) error { _ = os.MkdirAll(dataPath, 0755) args = append(args, "-E", "path.data="+dataPath) + // reset checkin state before starting the process. + c.lastCheckin = time.Time{} + c.missedCheckins = 0 + + // Ensure there is no pending checkin expected message buffered to avoid sending the new process + // the expected state of the previous process: https://github.com/elastic/beats/issues/34137 + comm.ClearPendingCheckinExpected() + proc, err := process.Start(path, process.WithArgs(args), process.WithEnv(env), - process.WithCmdOptions(attachOutErr, dirPath(workDir))) + process.WithCmdOptions(attachOutErr(c.logStd, c.logErr), dirPath(workDir))) if err != nil { return err } - c.lastCheckin = time.Time{} - c.missedCheckins = 0 + c.proc = proc c.forceCompState(client.UnitStateStarting, fmt.Sprintf("Starting: spawned pid '%d'", c.proc.PID)) c.startWatcher(proc, comm) @@ -333,7 +364,6 @@ func (c *CommandRuntime) stop(ctx context.Context) error { // cleanup reserved resources related to monitoring defer c.monitor.Cleanup(c.current.ID) //nolint:errcheck // this is ok - cmdSpec := c.getCommandSpec() go func(info *process.Info, timeout time.Duration) { t := time.NewTimer(timeout) @@ -372,9 +402,14 @@ func (c *CommandRuntime) startWatcher(info *process.Info, comm Communicator) { func (c *CommandRuntime) handleProc(state *os.ProcessState) bool { switch c.actionState { case actionStart: - // should still be running - stopMsg := fmt.Sprintf("Failed: pid '%d' exited with code '%d'", state.Pid(), state.ExitCode()) - c.forceCompState(client.UnitStateFailed, stopMsg) + if c.restartBucket != nil && c.restartBucket.Allow() { + stopMsg := fmt.Sprintf("Suppressing FAILED state due to restart for '%d' exited with code '%d'", state.Pid(), state.ExitCode()) + c.forceCompState(client.UnitStateStopped, stopMsg) + } else { + // report failure only if bucket is full of restart events + stopMsg := fmt.Sprintf("Failed: pid '%d' exited with code '%d'", state.Pid(), state.ExitCode()) + c.forceCompState(client.UnitStateFailed, stopMsg) + } return true case actionStop, actionTeardown: // stopping (should have exited) @@ -452,10 +487,62 @@ func (c *CommandRuntime) getCommandSpec() *component.CommandSpec { return nil } -func attachOutErr(cmd *exec.Cmd) error { - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - return nil +func (c *CommandRuntime) syncLogLevels() { + ll, unitLevels := getLogLevels(c.current) + c.logStd.SetLevels(ll, unitLevels) + ll, unitLevels = getLogLevels(c.current) // don't want to share mapping of units (so new map is generated) + c.logErr.SetLevels(ll, unitLevels) +} + +func attachOutErr(stdOut *logWriter, stdErr *logWriter) process.CmdOption { + return func(cmd *exec.Cmd) error { + cmd.Stdout = stdOut + cmd.Stderr = stdErr + return nil + } +} + +func createLogWriter(comp component.Component, cmdSpec *component.CommandSpec, typeStr string, binaryName string, ll zapcore.Level, unitLevels map[string]zapcore.Level, src logSource) *logWriter { + dataset := fmt.Sprintf("elastic_agent.%s", strings.ReplaceAll(strings.ReplaceAll(binaryName, "-", "_"), "/", "_")) + logger := logger.NewWithoutConfig("").With("component", map[string]interface{}{ + "id": comp.ID, + "type": typeStr, + "binary": binaryName, + "dataset": dataset, + }) + return newLogWriter(logger.Core(), cmdSpec.Log, ll, unitLevels, src) +} + +// getLogLevels returns the lowest log level and a mapping between each unit and its defined log level. +func getLogLevels(comp component.Component) (zapcore.Level, map[string]zapcore.Level) { + baseLevel := zapcore.ErrorLevel + unitLevels := make(map[string]zapcore.Level) + for _, unit := range comp.Units { + ll := toZapcoreLevel(unit.LogLevel) + unitLevels[unit.ID] = ll + if ll < baseLevel { + baseLevel = ll + } + } + return baseLevel, unitLevels +} + +func toZapcoreLevel(unitLevel client.UnitLogLevel) zapcore.Level { + switch unitLevel { + case client.UnitLogLevelError: + return zapcore.ErrorLevel + case client.UnitLogLevelWarn: + return zapcore.WarnLevel + case client.UnitLogLevelInfo: + return zapcore.InfoLevel + case client.UnitLogLevelDebug: + return zapcore.DebugLevel + case client.UnitLogLevelTrace: + // zap doesn't support trace + return zapcore.DebugLevel + } + // unknown level (default to info) + return zapcore.InfoLevel } func dirPath(path string) process.CmdOption { @@ -464,3 +551,19 @@ func dirPath(path string) process.CmdOption { return nil } } + +func newRateLimiter(restartMonitoringPeriod time.Duration, maxEventsPerPeriod int) *rate.Limiter { + if restartMonitoringPeriod <= 0 || maxEventsPerPeriod <= 0 { + return nil + } + + freq := restartMonitoringPeriod.Seconds() + events := float64(maxEventsPerPeriod) + perSecond := events / freq + if perSecond > 0 { + bucketSize := rate.Limit(perSecond) + return rate.NewLimiter(bucketSize, maxEventsPerPeriod) + } + + return nil +} diff --git a/pkg/component/runtime/command_test.go b/pkg/component/runtime/command_test.go new file mode 100644 index 00000000000..ca4ee184c06 --- /dev/null +++ b/pkg/component/runtime/command_test.go @@ -0,0 +1,46 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package runtime + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestAddToBucket(t *testing.T) { + testCases := map[string]struct { + bucketSize int + add int + addSleep time.Duration + shouldBlock bool + }{ + "no error": {1, 0, 1 * time.Millisecond, false}, + "error within limit": {1, 1, 1 * time.Millisecond, false}, + "errors > than limit but across timespans": {1, 2, 80 * time.Millisecond, false}, + "errors > than limit within timespans, exact bucket size": {2, 2, 2 * time.Millisecond, false}, + "errors > than limit within timespans, off by one": {2, 3, 2 * time.Millisecond, true}, + "errors > than limit within timespans": {2, 4, 2 * time.Millisecond, true}, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + dropRate := 50 * time.Millisecond + b := newRateLimiter(dropRate, tc.bucketSize) + + blocked := false + b.Allow() + <-time.After(dropRate + 20*time.Millisecond) // init ticker + + for i := 0; i < tc.add; i++ { + wasBlocked := !b.Allow() + blocked = blocked || wasBlocked + <-time.After(tc.addSleep) + } + require.Equal(t, tc.shouldBlock, blocked) + }) + } +} diff --git a/pkg/component/runtime/conn_info_server_test.go b/pkg/component/runtime/conn_info_server_test.go index 4b221a64930..43b7937eb99 100644 --- a/pkg/component/runtime/conn_info_server_test.go +++ b/pkg/component/runtime/conn_info_server_test.go @@ -60,6 +60,9 @@ func (c *mockCommunicator) WriteConnInfo(w io.Writer, services ...client.Service func (c *mockCommunicator) CheckinExpected(expected *proto.CheckinExpected) { } +func (c *mockCommunicator) ClearPendingCheckinExpected() { +} + func (c *mockCommunicator) CheckinObserved() <-chan *proto.CheckinObserved { return c.ch } diff --git a/pkg/component/runtime/log_writer.go b/pkg/component/runtime/log_writer.go new file mode 100644 index 00000000000..ee277c26fff --- /dev/null +++ b/pkg/component/runtime/log_writer.go @@ -0,0 +1,234 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package runtime + +import ( + "bytes" + "encoding/json" + "errors" + "strings" + "sync" + "time" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + "github.com/elastic/elastic-agent/pkg/component" +) + +type zapcoreWriter interface { + Write(zapcore.Entry, []zapcore.Field) error +} + +type logSource uint8 + +const ( + logSourceStdout logSource = 0 + logSourceStderr logSource = 1 +) + +// logWriter is an `io.Writer` that takes lines and passes them through the logger. +// +// `Write` handles parsing lines as either ndjson or plain text. +type logWriter struct { + loggerCore zapcoreWriter + logCfg component.CommandLogSpec + logLevel zap.AtomicLevel + unitLevels map[string]zapcore.Level + levelMx sync.RWMutex + remainder []byte + + // inheritLevel is the level that will be used for a log message in the case it doesn't define a log level + // for stdout it is INFO and for stderr it is ERROR. + inheritLevel zapcore.Level +} + +func newLogWriter(core zapcoreWriter, logCfg component.CommandLogSpec, ll zapcore.Level, unitLevels map[string]zapcore.Level, src logSource) *logWriter { + inheritLevel := zapcore.InfoLevel + if src == logSourceStderr { + inheritLevel = zapcore.ErrorLevel + } + return &logWriter{ + loggerCore: core, + logCfg: logCfg, + logLevel: zap.NewAtomicLevelAt(ll), + unitLevels: unitLevels, + inheritLevel: inheritLevel, + } +} + +func (r *logWriter) SetLevels(ll zapcore.Level, unitLevels map[string]zapcore.Level) { + r.logLevel.SetLevel(ll) + r.levelMx.Lock() + defer r.levelMx.Unlock() + r.unitLevels = unitLevels +} + +func (r *logWriter) Write(p []byte) (int, error) { + if len(p) == 0 { + // nothing to do + return 0, nil + } + offset := 0 + for { + idx := bytes.IndexByte(p[offset:], '\n') + if idx < 0 { + // not all used add to remainder to be used on next call + r.remainder = append(r.remainder, p[offset:]...) + return len(p), nil + } + + var line []byte + if r.remainder != nil { + line = r.remainder + r.remainder = nil + line = append(line, p[offset:offset+idx]...) + } else { + line = append(line, p[offset:offset+idx]...) + } + offset += idx + 1 + // drop '\r' from line (needed for Windows) + if len(line) > 0 && line[len(line)-1] == '\r' { + line = line[0 : len(line)-1] + } + if len(line) == 0 { + // empty line + continue + } + str := strings.TrimSpace(string(line)) + // try to parse line as JSON + if str[0] == '{' && r.handleJSON(str) { + // handled as JSON + continue + } + // considered standard text being it's not JSON, log at inherit level (if enabled) + if r.logLevel.Level().Enabled(r.inheritLevel) { + _ = r.loggerCore.Write(zapcore.Entry{ + Level: r.inheritLevel, + Time: time.Now(), + Message: str, + }, nil) + } + } +} + +func (r *logWriter) handleJSON(line string) bool { + var evt map[string]interface{} + if err := json.Unmarshal([]byte(line), &evt); err != nil { + return false + } + lvl := getLevel(evt, r.logCfg.LevelKey) + ts := getTimestamp(evt, r.logCfg.TimeKey, r.logCfg.TimeFormat) + msg := getMessage(evt, r.logCfg.MessageKey) + fields := getFields(evt, r.logCfg.IgnoreKeys) + + allowedLvl := r.logLevel.Level() + unitId := getUnitId(evt) + if unitId != "" { + r.levelMx.RLock() + if r.unitLevels != nil { + if unitLevel, ok := r.unitLevels[unitId]; ok { + allowedLvl = unitLevel + } + } + r.levelMx.RUnlock() + } + if allowedLvl.Enabled(lvl) { + _ = r.loggerCore.Write(zapcore.Entry{ + Level: lvl, + Time: ts, + Message: msg, + }, fields) + } + return true +} + +func getLevel(evt map[string]interface{}, key string) zapcore.Level { + lvl := zapcore.InfoLevel + err := unmarshalLevel(&lvl, getStrVal(evt, key)) + if err == nil { + delete(evt, key) + } + return lvl +} + +func unmarshalLevel(lvl *zapcore.Level, val string) error { + if val == "" { + return errors.New("empty val") + } else if val == "trace" { + // zap doesn't handle trace level we cast to debug + *lvl = zapcore.DebugLevel + return nil + } + return lvl.UnmarshalText([]byte(val)) +} + +func getMessage(evt map[string]interface{}, key string) string { + msg := getStrVal(evt, key) + if msg != "" { + delete(evt, key) + } + return msg +} + +func getTimestamp(evt map[string]interface{}, key string, format string) time.Time { + t, err := time.Parse(format, getStrVal(evt, key)) + if err == nil { + delete(evt, key) + return t + } + return time.Now() +} + +func getFields(evt map[string]interface{}, ignore []string) []zapcore.Field { + fields := make([]zapcore.Field, 0, len(evt)) + for k, v := range evt { + if len(ignore) > 0 && contains(ignore, k) { + // ignore field + continue + } + fields = append(fields, zap.Any(k, v)) + } + return fields +} + +func getStrVal(evt map[string]interface{}, key string) string { + raw, ok := evt[key] + if !ok { + return "" + } + str, ok := raw.(string) + if !ok { + return "" + } + return str +} + +func contains(s []string, val string) bool { + for _, v := range s { + if v == val { + return true + } + } + return false +} + +func getUnitId(evt map[string]interface{}) string { + if unitIdRaw, ok := evt["unit.id"]; ok { + if unitId, ok := unitIdRaw.(string); ok && unitId != "" { + return unitId + } + } + if unitMapRaw, ok := evt["unit"]; ok { + if unitMap, ok := unitMapRaw.(map[string]interface{}); ok { + if unitIdRaw, ok := unitMap["id"]; ok { + if unitId, ok := unitIdRaw.(string); ok && unitId != "" { + return unitId + } + } + } + } + return "" +} diff --git a/pkg/component/runtime/log_writer_test.go b/pkg/component/runtime/log_writer_test.go new file mode 100644 index 00000000000..f0b6b01caaa --- /dev/null +++ b/pkg/component/runtime/log_writer_test.go @@ -0,0 +1,327 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package runtime + +import ( + "sort" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + "github.com/elastic/elastic-agent/pkg/component" +) + +type wrote struct { + entry zapcore.Entry + fields []zapcore.Field +} + +func TestLogWriter(t *testing.T) { + scenarios := []struct { + Name string + LogLevel zapcore.Level + UnitLevels map[string]zapcore.Level + LogSource logSource + Config component.CommandLogSpec + Lines []string + Wrote []wrote + }{ + { + Name: "multi plain text line - info/stdout", + LogLevel: zapcore.InfoLevel, + LogSource: logSourceStdout, + Lines: []string{ + "simple written line\r\n", + "another written line\n", + }, + Wrote: []wrote{ + { + entry: zapcore.Entry{ + Level: zapcore.InfoLevel, + Time: time.Time{}, + Message: "simple written line", + }, + }, + { + entry: zapcore.Entry{ + Level: zapcore.InfoLevel, + Time: time.Time{}, + Message: "another written line", + }, + }, + }, + }, + { + Name: "multi plain text line - info/stderr", + LogLevel: zapcore.InfoLevel, + LogSource: logSourceStderr, + Lines: []string{ + "simple written line\r\n", + "another written line\n", + }, + Wrote: []wrote{ + { + entry: zapcore.Entry{ + Level: zapcore.ErrorLevel, + Time: time.Time{}, + Message: "simple written line", + }, + }, + { + entry: zapcore.Entry{ + Level: zapcore.ErrorLevel, + Time: time.Time{}, + Message: "another written line", + }, + }, + }, + }, + { + Name: "multi plain text line - error/stdout", + LogLevel: zapcore.ErrorLevel, + LogSource: logSourceStdout, + Lines: []string{ + "simple written line\r\n", + "another written line\n", + }, + Wrote: []wrote{}, + }, + { + Name: "multi split text line", + LogLevel: zapcore.InfoLevel, + LogSource: logSourceStdout, + Lines: []string{ + "simple written line\r\n", + " another line sp", + "lit on ", + "", + "multi writes\n", + "\r\n", + "\n", + }, + Wrote: []wrote{ + { + entry: zapcore.Entry{ + Level: zapcore.InfoLevel, + Time: time.Time{}, + Message: "simple written line", + }, + }, + { + entry: zapcore.Entry{ + Level: zapcore.InfoLevel, + Time: time.Time{}, + Message: "another line split on multi writes", + }, + }, + }, + }, + { + Name: "json log line split", + LogLevel: zapcore.DebugLevel, + LogSource: logSourceStdout, + Config: component.CommandLogSpec{ + LevelKey: "log.level", + TimeKey: "@timestamp", + TimeFormat: time.RFC3339Nano, + MessageKey: "message", + IgnoreKeys: []string{"ignore"}, + }, + Lines: []string{ + `{"@timestamp": "2009-11-10T23:00:00Z", "log.level": "debug", "message": "message`, + ` field", "string": "extra", "int": 50, "ignore": "other"}`, + "\n", + }, + Wrote: []wrote{ + { + entry: zapcore.Entry{ + Level: zapcore.DebugLevel, + Time: parseTime("2009-11-10T23:00:00Z", time.RFC3339Nano), + Message: "message field", + }, + fields: []zapcore.Field{ + zap.String("string", "extra"), + zap.Float64("int", 50), + }, + }, + }, + }, + { + Name: "invalid JSON line", + LogLevel: zapcore.DebugLevel, + LogSource: logSourceStdout, + Lines: []string{ + `{"broken": json`, + "\n", + }, + Wrote: []wrote{ + { + entry: zapcore.Entry{ + Level: zapcore.InfoLevel, + Time: time.Time{}, + Message: `{"broken": json`, + }, + }, + }, + }, + { + Name: "JSON drop log due to level", + LogLevel: zapcore.WarnLevel, + LogSource: logSourceStdout, + Lines: []string{ + `{"log.level": "info", "message": "not logged"}`, + "\n", + }, + Wrote: []wrote{}, + }, + { + Name: "JSON keep log due to level", + LogLevel: zapcore.InfoLevel, + LogSource: logSourceStdout, + Lines: []string{ + `{"@timestamp": "2009-11-10T23:00:00Z", "log.level": "info", "message": "message"}`, + "\n", + }, + Config: component.CommandLogSpec{ + LevelKey: "log.level", + TimeKey: "@timestamp", + TimeFormat: time.RFC3339Nano, + MessageKey: "message", + }, + Wrote: []wrote{ + { + entry: zapcore.Entry{ + Level: zapcore.InfoLevel, + Time: parseTime("2009-11-10T23:00:00Z", time.RFC3339Nano), + Message: "message", + }, + fields: []zapcore.Field{}, + }, + }, + }, + { + Name: "JSON drop unit specific log", + LogLevel: zapcore.ErrorLevel, + UnitLevels: map[string]zapcore.Level{ + "my-unit-id": zapcore.DebugLevel, + }, + LogSource: logSourceStdout, + Lines: []string{ + `{"@timestamp": "2009-11-10T23:00:00Z", "log.level": "info", "message": "info message", "unit.id": "my-unit-id"}`, + "\n", + `{"@timestamp": "2009-11-10T23:00:00Z", "log.level": "debug", "message": "debug message", "unit.id": "my-unit-id"}`, + "\n", + `{"@timestamp": "2009-11-10T23:00:00Z", "log.level": "info", "message": "info message", "unit": {"id": "my-unit-id"}}`, + "\n", + `{"@timestamp": "2009-11-10T23:00:00Z", "log.level": "info", "message": "dropped", "unit": {"id": "other-unit-id"}}`, + "\n", + `{"@timestamp": "2009-11-10T23:00:00Z", "log.level": "info", "message": "dropped"}`, + "\n", + }, + Config: component.CommandLogSpec{ + LevelKey: "log.level", + TimeKey: "@timestamp", + TimeFormat: time.RFC3339Nano, + MessageKey: "message", + }, + Wrote: []wrote{ + { + entry: zapcore.Entry{ + Level: zapcore.InfoLevel, + Time: parseTime("2009-11-10T23:00:00Z", time.RFC3339Nano), + Message: "info message", + }, + fields: []zapcore.Field{ + zap.String("unit.id", "my-unit-id"), + }, + }, + { + entry: zapcore.Entry{ + Level: zapcore.DebugLevel, + Time: parseTime("2009-11-10T23:00:00Z", time.RFC3339Nano), + Message: "debug message", + }, + fields: []zapcore.Field{ + zap.String("unit.id", "my-unit-id"), + }, + }, + { + entry: zapcore.Entry{ + Level: zapcore.InfoLevel, + Time: parseTime("2009-11-10T23:00:00Z", time.RFC3339Nano), + Message: "info message", + }, + fields: []zapcore.Field{ + zap.Any("unit", map[string]interface{}{ + "id": "my-unit-id", + }), + }, + }, + }, + }, + } + + for _, scenario := range scenarios { + t.Run(scenario.Name, func(t *testing.T) { + c := &captureCore{} + w := newLogWriter(c, scenario.Config, scenario.LogLevel, scenario.UnitLevels, scenario.LogSource) + for _, line := range scenario.Lines { + l := len([]byte(line)) + c, err := w.Write([]byte(line)) + require.NoError(t, err) + require.Equal(t, l, c) + } + require.Len(t, c.wrote, len(scenario.Wrote)) + for i := 0; i < len(scenario.Wrote); i++ { + e := scenario.Wrote[i] + o := c.wrote[i] + if e.entry.Time.IsZero() { + // can't ensure times match; set it to observed before ensuring its equal + e.entry.Time = o.entry.Time + } + assert.Equal(t, e.entry, o.entry) + + // ensure the fields are in the same order (doesn't really matter for logging; but test cares) + if len(e.fields) > 0 { + sortFields(e.fields) + } + if len(o.fields) > 0 { + sortFields(o.fields) + } + assert.EqualValues(t, e.fields, o.fields) + } + }) + } +} + +type captureCore struct { + wrote []wrote +} + +func (c *captureCore) Write(entry zapcore.Entry, fields []zapcore.Field) error { + c.wrote = append(c.wrote, wrote{ + entry: entry, + fields: fields, + }) + return nil +} + +func parseTime(t string, format string) time.Time { + v, err := time.Parse(format, t) + if err != nil { + panic(err) + } + return v +} + +func sortFields(fields []zapcore.Field) { + sort.Slice(fields, func(i, j int) bool { + return fields[i].Key < fields[j].Key + }) +} diff --git a/pkg/component/runtime/manager.go b/pkg/component/runtime/manager.go index 0c4befc5e2f..7e390a1e744 100644 --- a/pkg/component/runtime/manager.go +++ b/pkg/component/runtime/manager.go @@ -29,6 +29,7 @@ import ( "github.com/elastic/elastic-agent-libs/atomic" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" + "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/core/authority" "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/core/logger" @@ -54,6 +55,7 @@ var ( type ComponentComponentState struct { Component component.Component `yaml:"component"` State ComponentState `yaml:"state"` + LegacyPID string `yaml:"-"` // To propagate PID for the /processes, and yes, it was a string } // ComponentUnitDiagnosticRequest used to request diagnostics from specific unit. @@ -80,6 +82,7 @@ type Manager struct { agentInfo *info.AgentInfo tracer *apm.Tracer monitor MonitoringManager + grpcConfig *configuration.GRPCConfig netMx sync.RWMutex listener net.Listener @@ -103,7 +106,7 @@ type Manager struct { } // NewManager creates a new manager. -func NewManager(logger *logger.Logger, listenAddr string, agentInfo *info.AgentInfo, tracer *apm.Tracer, monitor MonitoringManager) (*Manager, error) { +func NewManager(logger *logger.Logger, listenAddr string, agentInfo *info.AgentInfo, tracer *apm.Tracer, monitor MonitoringManager, grpcConfig *configuration.GRPCConfig) (*Manager, error) { ca, err := authority.NewCA() if err != nil { return nil, err @@ -120,6 +123,7 @@ func NewManager(logger *logger.Logger, listenAddr string, agentInfo *info.AgentI subscriptions: make(map[string][]*Subscription), errCh: make(chan error), monitor: monitor, + grpcConfig: grpcConfig, } return m, nil } @@ -153,9 +157,13 @@ func (m *Manager) Run(ctx context.Context) error { server = grpc.NewServer( grpc.UnaryInterceptor(apmInterceptor), grpc.Creds(creds), + grpc.MaxRecvMsgSize(m.grpcConfig.MaxMsgSize), ) } else { - server = grpc.NewServer(grpc.Creds(creds)) + server = grpc.NewServer( + grpc.Creds(creds), + grpc.MaxRecvMsgSize(m.grpcConfig.MaxMsgSize), + ) } m.netMx.Lock() m.server = server @@ -171,7 +179,7 @@ func (m *Manager) Run(ctx context.Context) error { for { err := server.Serve(lis) if err != nil { - m.logger.Errorf("control protocol failed: %w", err) + m.logger.Errorf("control protocol failed: %s", err) } if ctx.Err() != nil { // context has an error don't start again @@ -284,9 +292,21 @@ func (m *Manager) State() []ComponentComponentState { states := make([]ComponentComponentState, 0, len(m.current)) for _, crs := range m.current { crs.latestMx.RLock() + var legacyPID string + if crs.runtime != nil { + if commandRuntime, ok := crs.runtime.(*CommandRuntime); ok { + if commandRuntime != nil { + procInfo := commandRuntime.proc + if procInfo != nil { + legacyPID = fmt.Sprint(commandRuntime.proc.PID) + } + } + } + } states = append(states, ComponentComponentState{ Component: crs.currComp, State: crs.latestState.Copy(), + LegacyPID: legacyPID, }) crs.latestMx.RUnlock() } diff --git a/pkg/component/runtime/manager_test.go b/pkg/component/runtime/manager_test.go index a26c9f037a4..2e97fb22121 100644 --- a/pkg/component/runtime/manager_test.go +++ b/pkg/component/runtime/manager_test.go @@ -25,6 +25,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -62,7 +63,7 @@ func TestManager_SimpleComponentErr(t *testing.T) { defer cancel() ai, _ := info.NewAgentInfo(true) - m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr(), configuration.DefaultGRPCConfig()) require.NoError(t, err) errCh := make(chan error) go func() { @@ -164,7 +165,7 @@ func TestManager_FakeInput_StartStop(t *testing.T) { defer cancel() ai, _ := info.NewAgentInfo(true) - m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr(), configuration.DefaultGRPCConfig()) require.NoError(t, err) errCh := make(chan error) go func() { @@ -289,7 +290,7 @@ func TestManager_FakeInput_BadUnitToGood(t *testing.T) { defer cancel() ai, _ := info.NewAgentInfo(true) - m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr(), configuration.DefaultGRPCConfig()) require.NoError(t, err) errCh := make(chan error) go func() { @@ -460,7 +461,7 @@ func TestManager_FakeInput_GoodUnitToBad(t *testing.T) { defer cancel() ai, _ := info.NewAgentInfo(true) - m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr(), configuration.DefaultGRPCConfig()) require.NoError(t, err) errCh := make(chan error) go func() { @@ -615,7 +616,7 @@ func TestManager_FakeInput_Configure(t *testing.T) { defer cancel() ai, _ := info.NewAgentInfo(true) - m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr(), configuration.DefaultGRPCConfig()) require.NoError(t, err) errCh := make(chan error) go func() { @@ -741,7 +742,7 @@ func TestManager_FakeInput_RemoveUnit(t *testing.T) { defer cancel() ai, _ := info.NewAgentInfo(true) - m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr(), configuration.DefaultGRPCConfig()) require.NoError(t, err) errCh := make(chan error) go func() { @@ -899,7 +900,7 @@ func TestManager_FakeInput_ActionState(t *testing.T) { defer cancel() ai, _ := info.NewAgentInfo(true) - m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr(), configuration.DefaultGRPCConfig()) require.NoError(t, err) errCh := make(chan error) go func() { @@ -1029,7 +1030,7 @@ func TestManager_FakeInput_Restarts(t *testing.T) { defer cancel() ai, _ := info.NewAgentInfo(true) - m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr(), configuration.DefaultGRPCConfig()) require.NoError(t, err) errCh := make(chan error) go func() { @@ -1168,7 +1169,7 @@ func TestManager_FakeInput_RestartsOnMissedCheckins(t *testing.T) { defer cancel() ai, _ := info.NewAgentInfo(true) - m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr(), configuration.DefaultGRPCConfig()) require.NoError(t, err) errCh := make(chan error) go func() { @@ -1289,7 +1290,7 @@ func TestManager_FakeInput_InvalidAction(t *testing.T) { defer cancel() ai, _ := info.NewAgentInfo(true) - m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr(), configuration.DefaultGRPCConfig()) require.NoError(t, err) errCh := make(chan error) go func() { @@ -1413,7 +1414,7 @@ func TestManager_FakeInput_MultiComponent(t *testing.T) { defer cancel() ai, _ := info.NewAgentInfo(true) - m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr(), configuration.DefaultGRPCConfig()) require.NoError(t, err) errCh := make(chan error) go func() { @@ -1625,7 +1626,7 @@ func TestManager_FakeInput_LogLevel(t *testing.T) { defer cancel() ai, _ := info.NewAgentInfo(true) - m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr(), configuration.DefaultGRPCConfig()) require.NoError(t, err) errCh := make(chan error) go func() { @@ -1777,7 +1778,7 @@ func TestManager_FakeShipper(t *testing.T) { defer cancel() ai, _ := info.NewAgentInfo(true) - m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr(), configuration.DefaultGRPCConfig()) require.NoError(t, err) errCh := make(chan error) go func() { @@ -2126,5 +2127,5 @@ type testMonitoringManager struct{} func newTestMonitoringMgr() *testMonitoringManager { return &testMonitoringManager{} } func (*testMonitoringManager) EnrichArgs(_ string, _ string, args []string) []string { return args } -func (*testMonitoringManager) Prepare() error { return nil } +func (*testMonitoringManager) Prepare(_ string) error { return nil } func (*testMonitoringManager) Cleanup(string) error { return nil } diff --git a/pkg/component/runtime/runtime.go b/pkg/component/runtime/runtime.go index 0ed1b46c26c..aa780a002e5 100644 --- a/pkg/component/runtime/runtime.go +++ b/pkg/component/runtime/runtime.go @@ -60,7 +60,7 @@ func NewComponentRuntime(comp component.Component, logger *logger.Logger, monito } if comp.InputSpec != nil { if comp.InputSpec.Spec.Command != nil { - return NewCommandRuntime(comp, monitor) + return NewCommandRuntime(comp, logger, monitor) } if comp.InputSpec.Spec.Service != nil { return NewServiceRuntime(comp, logger) @@ -69,7 +69,7 @@ func NewComponentRuntime(comp component.Component, logger *logger.Logger, monito } if comp.ShipperSpec != nil { if comp.ShipperSpec.Spec.Command != nil { - return NewCommandRuntime(comp, monitor) + return NewCommandRuntime(comp, logger, monitor) } return nil, errors.New("components for shippers can only support command runtime") } diff --git a/pkg/component/runtime/runtime_comm.go b/pkg/component/runtime/runtime_comm.go index 2bc2e297179..57f1032db5b 100644 --- a/pkg/component/runtime/runtime_comm.go +++ b/pkg/component/runtime/runtime_comm.go @@ -34,6 +34,8 @@ type Communicator interface { WriteConnInfo(w io.Writer, services ...client.Service) error // CheckinExpected sends the expected state to the component. CheckinExpected(expected *proto.CheckinExpected) + // ClearPendingCheckinExpected clears eny pending checkin expected messages. + ClearPendingCheckinExpected() // CheckinObserved receives the observed state from the component. CheckinObserved() <-chan *proto.CheckinObserved } @@ -144,10 +146,7 @@ func (c *runtimeComm) CheckinExpected(expected *proto.CheckinExpected) { c.checkinExpectedLock.Lock() // Empty the channel - select { - case <-c.checkinExpected: - default: - } + c.ClearPendingCheckinExpected() // Put the new expected state in c.checkinExpected <- expected @@ -155,6 +154,13 @@ func (c *runtimeComm) CheckinExpected(expected *proto.CheckinExpected) { c.checkinExpectedLock.Unlock() } +func (c *runtimeComm) ClearPendingCheckinExpected() { + select { + case <-c.checkinExpected: + default: + } +} + func (c *runtimeComm) CheckinObserved() <-chan *proto.CheckinObserved { return c.checkinObserved } diff --git a/pkg/component/runtime/service.go b/pkg/component/runtime/service.go index 41cf1b517cc..a032d9abd06 100644 --- a/pkg/component/runtime/service.go +++ b/pkg/component/runtime/service.go @@ -114,6 +114,7 @@ func (s *ServiceRuntime) Run(ctx context.Context, comm Communicator) (err error) // Initial state on start lastCheckin = time.Time{} missedCheckins = 0 + comm.ClearPendingCheckinExpected() checkinTimer.Stop() cisStop() diff --git a/pkg/component/runtime/state.go b/pkg/component/runtime/state.go index 832b7548ba7..0042b9ccb70 100644 --- a/pkg/component/runtime/state.go +++ b/pkg/component/runtime/state.go @@ -8,6 +8,8 @@ import ( "errors" "reflect" + gproto "google.golang.org/protobuf/proto" + "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent-client/v7/pkg/proto" "github.com/elastic/elastic-agent/pkg/component" @@ -120,7 +122,7 @@ func (s *ComponentState) syncExpected(comp *component.Component) bool { existing.logLevel = unit.LogLevel changed = true } - if !reflect.DeepEqual(existing.config, unit.Config) { + if !gproto.Equal(existing.config, unit.Config) { existing.config = unit.Config existing.configStateIdx++ changed = true diff --git a/pkg/component/spec.go b/pkg/component/spec.go index e7ec47a5811..ac19b0ba51e 100644 --- a/pkg/component/spec.go +++ b/pkg/component/spec.go @@ -75,9 +75,12 @@ type RuntimePreventionSpec struct { // CommandSpec is the specification for an input that executes as a subprocess. type CommandSpec struct { - Args []string `config:"args,omitempty" yaml:"args,omitempty"` - Env []CommandEnvSpec `config:"env,omitempty" yaml:"env,omitempty"` - Timeouts CommandTimeoutSpec `config:"timeouts" yaml:"timeouts"` + Args []string `config:"args,omitempty" yaml:"args,omitempty"` + Env []CommandEnvSpec `config:"env,omitempty" yaml:"env,omitempty"` + Timeouts CommandTimeoutSpec `config:"timeouts" yaml:"timeouts"` + Log CommandLogSpec `config:"log" yaml:"log"` + RestartMonitoringPeriod time.Duration `config:"restart_monitoring_period,omitempty" yaml:"restart_monitoring_period,omitempty"` + MaxRestartsPerPeriod int `config:"maximum_restarts_per_period,omitempty" yaml:"maximum_restarts_per_period,omitempty"` } // CommandEnvSpec is the specification that defines environment variables that will be set to execute the subprocess. @@ -100,6 +103,23 @@ func (t *CommandTimeoutSpec) InitDefaults() { t.Stop = 30 * time.Second } +// CommandLogSpec is the log specification for subprocess. +type CommandLogSpec struct { + LevelKey string `config:"level_key" yaml:"level_key"` + TimeKey string `config:"time_key" yaml:"time_key"` + TimeFormat string `config:"time_format" yaml:"time_format"` + MessageKey string `config:"message_key" yaml:"message_key"` + IgnoreKeys []string `config:"ignore_keys" yaml:"ignore_keys"` +} + +// InitDefaults initialized the defaults for the timeouts. +func (t *CommandLogSpec) InitDefaults() { + t.LevelKey = "log.level" + t.TimeKey = "@timestamp" + t.TimeFormat = "2006-01-02T15:04:05.000Z0700" + t.MessageKey = "message" +} + // ServiceTimeoutSpec is the timeout specification for subprocess. type ServiceTimeoutSpec struct { Checkin time.Duration `config:"checkin" yaml:"checkin"` diff --git a/pkg/core/logger/logger.go b/pkg/core/logger/logger.go index 049fd271038..1bb01db4c01 100644 --- a/pkg/core/logger/logger.go +++ b/pkg/core/logger/logger.go @@ -26,6 +26,9 @@ const agentName = "elastic-agent" const iso8601Format = "2006-01-02T15:04:05.000Z0700" +// Level is the level used in agent. +type Level = logp.Level + // DefaultLogLevel used in agent and its processes. const DefaultLogLevel = logp.InfoLevel @@ -58,6 +61,13 @@ func NewFromConfig(name string, cfg *Config, logInternal bool) (*Logger, error) return new(name, cfg, logInternal) } +// NewWithoutConfig returns a new logger without having a configuration. +// +// Use only when a clean logger is needed, and it is known that the logging configuration has already been performed. +func NewWithoutConfig(name string) *Logger { + return logp.NewLogger(name) +} + func new(name string, cfg *Config, logInternal bool) (*Logger, error) { commonCfg, err := toCommonConfig(cfg) if err != nil { @@ -98,6 +108,11 @@ func toCommonConfig(cfg *Config) (*config.C, error) { return commonLogp, nil } +// SetLevel changes the overall log level of the global logger. +func SetLevel(lvl logp.Level) { + logp.SetLevel(lvl.ZapLevel()) +} + // DefaultLoggingConfig returns default configuration for agent logging. func DefaultLoggingConfig() *Config { cfg := logp.DefaultConfig(logp.DefaultEnvironment) @@ -106,6 +121,7 @@ func DefaultLoggingConfig() *Config { cfg.ToFiles = true cfg.Files.Path = paths.Logs() cfg.Files.Name = agentName + cfg.Files.MaxSize = 20 * 1024 * 1024 return &cfg } diff --git a/specs/apm-server.spec.yml b/specs/apm-server.spec.yml index e646e9facce..4f8017544d1 100644 --- a/specs/apm-server.spec.yml +++ b/specs/apm-server.spec.yml @@ -1,23 +1,27 @@ -version: 2 -inputs: - - name: apm - description: "APM Server" - platforms: - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: - - elasticsearch - - kafka - - logstash - - redis - command: - args: - - "-E" - - "management.enabled=true" - - "-E" - - "gc_percent=${APMSERVER_GOGC:100}" +version: 2 +inputs: + - name: apm + description: "APM Server" + platforms: + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: + - elasticsearch + - kafka + - logstash + - redis + command: + args: + - "-E" + - "management.enabled=true" + - "-E" + - "gc_percent=${APMSERVER_GOGC:100}" + - "-E" + - "logging.level=info" + - "-E" + - "logging.to_stderr=true" diff --git a/specs/auditbeat.spec.yml b/specs/auditbeat.spec.yml index f8c46a96873..00b374896dd 100644 --- a/specs/auditbeat.spec.yml +++ b/specs/auditbeat.spec.yml @@ -1,43 +1,49 @@ -version: 2 -inputs: - - name: audit/auditd - description: "Auditd" - platforms: &platforms - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: &outputs - - elasticsearch - - kafka - - logstash - - redis - command: - args: &args - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "management.enabled=true" - - "-E" - - "logging.level=debug" - - "-E" - - "gc_percent=${AUDITBEAT_GOGC:100}" - - "-E" - - "auditbeat.config.modules.enabled=false" - - name: audit/file_integrity - description: "Audit File Integrity" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: audit/system - description: "Audit System" - platforms: *platforms - outputs: *outputs - command: - args: *args +version: 2 +inputs: + - name: audit/auditd + description: "Auditd" + platforms: &platforms + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: &outputs + - elasticsearch + - kafka + - logstash + - redis + command: &command + restart_monitoring_period: 5s + maximum_restarts_per_period: 1 + timeouts: + restart: 1s + args: + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "management.restart_on_output_change=true" + - "-E" + - "logging.level=info" + - "-E" + - "logging.to_stderr=true" + - "-E" + - "gc_percent=${AUDITBEAT_GOGC:100}" + - "-E" + - "auditbeat.config.modules.enabled=false" + - name: audit/file_integrity + description: "Audit File Integrity" + platforms: *platforms + outputs: *outputs + command: *command + - name: audit/system + description: "Audit System" + platforms: *platforms + outputs: *outputs + command: *command diff --git a/specs/cloud-defend.spec.yml b/specs/cloud-defend.spec.yml new file mode 100644 index 00000000000..83cd0d84983 --- /dev/null +++ b/specs/cloud-defend.spec.yml @@ -0,0 +1,14 @@ +version: 2 +inputs: + - name: cloud_defend/control + description: "Defend for containers" + platforms: &platforms + - container/amd64 + - container/arm64 + outputs: &outputs + - elasticsearch + command: + args: &args + - "run" + - "--fleet-managed" + - "--process-managed" diff --git a/specs/cloudbeat.spec.yml b/specs/cloudbeat.spec.yml index 1ecbe47e330..fca26ec2acb 100644 --- a/specs/cloudbeat.spec.yml +++ b/specs/cloudbeat.spec.yml @@ -1,39 +1,52 @@ -version: 2 -inputs: - - name: cloudbeat - description: "Cloudbeat" - platforms: &platforms - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: &outputs - - elasticsearch - - kafka - - logstash - - redis - command: - args: &args - - "-E" - - "management.enabled=true" - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "gc_percent=${CLOUDBEAT_GOGC:100}" - - name: cloudbeat/cis_k8s - description: "CIS Kubernetes monitoring" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: cloudbeat/cis_eks - description: "CIS elastic Kubernetes monitoring" - platforms: *platforms - outputs: *outputs - command: - args: *args \ No newline at end of file +version: 2 +inputs: + - name: cloudbeat + description: "Cloudbeat" + platforms: &platforms + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: &outputs + - elasticsearch + - kafka + - logstash + - redis + command: &command + restart_monitoring_period: 5s + maximum_restarts_per_period: 1 + timeouts: + restart: 1s + args: + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "management.restart_on_output_change=true" + - "-E" + - "logging.level=info" + - "-E" + - "logging.to_stderr=true" + - "-E" + - "gc_percent=${CLOUDBEAT_GOGC:100}" + - name: cloudbeat/cis_k8s + description: "CIS Kubernetes monitoring" + platforms: *platforms + outputs: *outputs + command: *command + - name: cloudbeat/cis_eks + description: "CIS elastic Kubernetes monitoring" + platforms: *platforms + outputs: *outputs + command: *command + - name: cloudbeat/cis_aws + description: "CIS AWS monitoring" + platforms: *platforms + outputs: *outputs + command: *command diff --git a/specs/endpoint-security.spec.yml b/specs/endpoint-security.spec.yml index 69827c68e75..42e36d92b00 100644 --- a/specs/endpoint-security.spec.yml +++ b/specs/endpoint-security.spec.yml @@ -5,8 +5,11 @@ inputs: platforms: - linux/amd64 - linux/arm64 + - container/amd64 + - container/arm64 outputs: - elasticsearch + - logstash runtime: preventions: - condition: ${runtime.user.root} == false @@ -46,6 +49,7 @@ inputs: - darwin/arm64 outputs: - elasticsearch + - logstash service: cport: 6788 log: @@ -57,6 +61,7 @@ inputs: - windows/amd64 outputs: - elasticsearch + - logstash runtime: preventions: - condition: ${runtime.user.root} == false @@ -65,4 +70,4 @@ inputs: cport: 6788 log: path: "C:\\Program Files\\Elastic\\Endpoint\\state\\log\\endpoint-*.log" - operations: *operations \ No newline at end of file + operations: *operations diff --git a/specs/filebeat.spec.yml b/specs/filebeat.spec.yml index e18fcbb1e65..14481950307 100644 --- a/specs/filebeat.spec.yml +++ b/specs/filebeat.spec.yml @@ -17,8 +17,12 @@ inputs: - redis shippers: &shippers - shipper - command: - args: &args + command: &command + restart_monitoring_period: 5s + maximum_restarts_per_period: 1 + timeouts: + restart: 1s + args: - "-E" - "setup.ilm.enabled=false" - "-E" @@ -26,7 +30,11 @@ inputs: - "-E" - "management.enabled=true" - "-E" - - "logging.level=debug" + - "management.restart_on_output_change=true" + - "-E" + - "logging.level=info" + - "-E" + - "logging.to_stderr=true" - "-E" - "gc_percent=${FILEBEAT_GOGC:100}" - "-E" @@ -36,36 +44,37 @@ inputs: platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: azure-eventhub description: "Azure Eventhub" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: cel description: "Common Expression Language Input" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: cloudfoundry description: "PCF Cloudfoundry" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command + - name: cometd + description: "CometD input" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: *command - name: container description: "Container logs" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: docker aliases: - log/docker @@ -73,43 +82,37 @@ inputs: platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: gcp-pubsub description: "GCP Pub-Sub" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: http_endpoint description: "HTTP Endpoint" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: httpjson description: "HTTP JSON Endpoint" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: journald description: "Journald" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: kafka description: "Kafka" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: log aliases: - logfile @@ -118,29 +121,25 @@ inputs: platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: mqtt description: "MQTT" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: netflow description: "Netflow" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: o365audit description: "Office 365 Audit" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: redis aliases: - log/redis_slowlog @@ -148,8 +147,7 @@ inputs: platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: syslog aliases: - log/syslog @@ -157,8 +155,7 @@ inputs: platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: tcp aliases: - event/tcp @@ -166,8 +163,7 @@ inputs: platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: udp aliases: - event/udp @@ -175,26 +171,22 @@ inputs: platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: unix description: "Unix Socket" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: winlog description: "Winlog" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: filestream description: "Filestream" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command diff --git a/specs/fleet-server.spec.yml b/specs/fleet-server.spec.yml index f1e760efe8b..8eff2122160 100644 --- a/specs/fleet-server.spec.yml +++ b/specs/fleet-server.spec.yml @@ -16,4 +16,6 @@ inputs: args: - "--agent-mode" - "-E" + - "logging.level=debug" + - "-E" - "logging.to_stderr=true" diff --git a/specs/heartbeat.spec.yml b/specs/heartbeat.spec.yml index ba6a08934b8..b7cc46c490a 100644 --- a/specs/heartbeat.spec.yml +++ b/specs/heartbeat.spec.yml @@ -1,6 +1,6 @@ version: 2 inputs: - - name: synthetics/synthetics + - name: synthetics/browser description: "Synthetics Browser Monitor" platforms: &platforms - linux/amd64 @@ -12,8 +12,12 @@ inputs: - container/arm64 outputs: &outputs - elasticsearch - command: - args: &args + command: &command + restart_monitoring_period: 5s + maximum_restarts_per_period: 1 + timeouts: + restart: 1s + args: - "-E" - "setup.ilm.enabled=false" - "-E" @@ -21,24 +25,25 @@ inputs: - "-E" - "management.enabled=true" - "-E" - - "logging.level=debug" + - "management.restart_on_output_change=true" + - "-E" + - "logging.level=info" + - "-E" + - "logging.to_stderr=true" - "-E" - "gc_percent=${HEARTBEAT_GOGC:100}" - name: synthetics/http description: "Synthetics HTTP Monitor" platforms: *platforms outputs: *outputs - command: - args: *args + command: *command - name: synthetics/icmp description: "Synthetics ICMP Monitor" platforms: *platforms outputs: *outputs - command: - args: *args + command: *command - name: synthetics/tcp description: "Synthetics TCP Monitor" platforms: *platforms outputs: *outputs - command: - args: *args + command: *command diff --git a/specs/metricbeat.spec.yml b/specs/metricbeat.spec.yml index b7c88ad4864..7b4fec07054 100644 --- a/specs/metricbeat.spec.yml +++ b/specs/metricbeat.spec.yml @@ -17,8 +17,12 @@ inputs: - redis shippers: &shippers - shipper - command: - args: &args + command: &command + restart_monitoring_period: 5s + maximum_restarts_per_period: 1 + timeouts: + restart: 1s + args: - "-E" - "setup.ilm.enabled=false" - "-E" @@ -26,7 +30,11 @@ inputs: - "-E" - "management.enabled=true" - "-E" - - "logging.level=debug" + - "management.restart_on_output_change=true" + - "-E" + - "logging.level=info" + - "-E" + - "logging.to_stderr=true" - "-E" - "gc_percent=${METRICBEAT_GOGC:100}" - "-E" @@ -36,152 +44,232 @@ inputs: platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: elasticsearch/metrics description: "Elasticsearch metrics" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: kibana/metrics description: "Kibana metrics" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: kubernetes/metrics description: "Kubernetes metrics" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: linux/metrics description: "Linux metrics" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: logstash/metrics description: "Logstash metrics" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: mongodb/metrics description: "Mongodb metrics" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: mysql/metrics description: "MySQL metrics" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: postgresql/metrics description: "PostgreSQL metrics" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: redis/metrics description: "Redis metrics" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: system/metrics description: "System metrics" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: uwsgi/metrics description: "UWSGI metrics" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: windows/metrics description: "Windows metrics" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: aws/metrics description: "AWS metrics" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: awsfargate/metrics description: "AWS Fargate metrics" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: azure/metrics description: "Azure metrics" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: cloudfoundry/metrics description: "PCF Cloudfoundry metrics" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: containerd/metrics description: "Containerd metrics" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: mssql/metrics description: "Microsoft SQL Server metrics" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: oracle/metrics description: "Oracle Database metrics" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: syncgateway/metrics description: "Couchbase Sync Gateway metrics" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command - name: http/metrics description: "HTTP metrics" platforms: *platforms outputs: *outputs shippers: *shippers - command: - args: *args + command: *command + - name: activemq/metrics + description: "ActiveMQ metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: *command + - name: apache/metrics + description: "Apache metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: *command + - name: etcd/metrics + description: "Etcd metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: *command + - name: gcp/metrics + description: "GCP metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: *command + - name: haproxy/metrics + description: "HAProxy metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: *command + - name: iis/metrics + description: "IIS metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: *command + - name: jolokia/metrics + description: "Jolokia metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: *command + - name: kafka/metrics + description: "Kafka metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: *command + - name: nats/metrics + description: "NATS metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: *command + - name: nginx/metrics + description: "NGINX metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: *command + - name: prometheus/metrics + description: "Prometheus metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: *command + - name: rabbitmq/metrics + description: "RabbitMQ metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: *command + - name: sql/metrics + description: "SQL metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: *command + - name: stan/metrics + description: "Stan metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: *command + - name: traefik/metrics + description: "Traefik metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: *command + - name: vsphere/metrics + description: "VSphere metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: *command + - name: zookeeper/metrics + description: "ZooKeeper metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: *command diff --git a/specs/osquerybeat.spec.yml b/specs/osquerybeat.spec.yml index 31edb9a3edb..244eb1fe839 100644 --- a/specs/osquerybeat.spec.yml +++ b/specs/osquerybeat.spec.yml @@ -1,26 +1,35 @@ -version: 2 -inputs: - - name: osquery - description: "Osquery" - platforms: - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: - - elasticsearch - command: - args: - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "management.enabled=true" - - "-E" - - "logging.level=debug" - - "-E" - - "gc_percent=${OSQUERYBEAT_GOGC:100}" +version: 2 +inputs: + - name: osquery + description: "Osquery" + platforms: + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: + - elasticsearch + - logstash + command: + restart_monitoring_period: 5s + maximum_restarts_per_period: 1 + timeouts: + restart: 1s + args: + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "management.restart_on_output_change=true" + - "-E" + - "logging.level=info" + - "-E" + - "logging.to_stderr=true" + - "-E" + - "gc_percent=${OSQUERYBEAT_GOGC:100}" diff --git a/specs/packetbeat.spec.yml b/specs/packetbeat.spec.yml index 0519078cac8..13e0683b5ca 100644 --- a/specs/packetbeat.spec.yml +++ b/specs/packetbeat.spec.yml @@ -1,29 +1,37 @@ -version: 2 -inputs: - - name: packet - description: "Packet Capture" - platforms: - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: - - elasticsearch - - kafka - - logstash - - redis - command: - args: - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "management.enabled=true" - - "-E" - - "logging.level=debug" - - "-E" - - "gc_percent=${PACKETBEAT_GOGC:100}" +version: 2 +inputs: + - name: packet + description: "Packet Capture" + platforms: + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: + - elasticsearch + - kafka + - logstash + - redis + command: + restart_monitoring_period: 5s + maximum_restarts_per_period: 1 + timeouts: + restart: 1s + args: + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "management.restart_on_output_change=true" + - "-E" + - "logging.level=info" + - "-E" + - "logging.to_stderr=true" + - "-E" + - "gc_percent=${PACKETBEAT_GOGC:100}" diff --git a/testing/environments/Dockerfile b/testing/environments/Dockerfile index 0543985dadd..e2fe35768ba 100644 --- a/testing/environments/Dockerfile +++ b/testing/environments/Dockerfile @@ -1,6 +1,6 @@ # Basic debian file with curl, wget and nano installed to fetch files # an update config files -FROM debian:latest +FROM debian:11.5 MAINTAINER Nicolas Ruflin RUN apt-get update && \ diff --git a/testing/environments/docker/kafka/Dockerfile b/testing/environments/docker/kafka/Dockerfile index 1a5e58836bc..eba9ef58a66 100644 --- a/testing/environments/docker/kafka/Dockerfile +++ b/testing/environments/docker/kafka/Dockerfile @@ -1,4 +1,4 @@ -FROM debian:stretch +FROM debian:11.5 ENV KAFKA_HOME /kafka # The advertised host is kafka. This means it will not work if container is started locally and connected from localhost to it diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index edcbb7c83c7..f5ccbb0a605 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-9a0f87a9-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.7.0-d6e7f5ec-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.6.0-9a0f87a9-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.7.0-d6e7f5ec-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" diff --git a/version/docs/version.asciidoc b/version/docs/version.asciidoc index 28c84d61f3d..e274f91f10e 100644 --- a/version/docs/version.asciidoc +++ b/version/docs/version.asciidoc @@ -1,6 +1,6 @@ :stack-version: 8.3.0 :doc-branch: main -:go-version: 1.18.8 +:go-version: 1.18.9 :release-state: unreleased :python: 3.7 :docker: 1.12 diff --git a/version/version.go b/version/version.go index 60029093c35..157788f4889 100644 --- a/version/version.go +++ b/version/version.go @@ -4,4 +4,4 @@ package version -const defaultBeatVersion = "8.6.0" +const defaultBeatVersion = "8.7.0"