diff --git a/.cloudbuild/ci/doc-tests.yaml b/.cloudbuild/ci/doc-tests.yaml index 19928b9432dbd..b63204376c76a 100644 --- a/.cloudbuild/ci/doc-tests.yaml +++ b/.cloudbuild/ci/doc-tests.yaml @@ -1,11 +1,9 @@ steps: - name: quay.io/gravitational/next:main id: docs-test - env: - - WITH_EXTERNAL_LINKS=true entrypoint: /bin/bash dir: /src - args: - - -c - - ln -s /workspace /src/content && yarn markdown-lint-external-links + args: + - -c + - ln -s /workspace /src/content && yarn markdown-lint timeout: 10m diff --git a/.gitignore b/.gitignore index 5c34de0dafb44..b8f41bf0d10a5 100644 --- a/.gitignore +++ b/.gitignore @@ -75,3 +75,7 @@ ssh.config /tctl /teleport /tsh + +# Go workspace files +go.work +go.work.sum \ No newline at end of file diff --git a/Makefile b/Makefile index f05b19d9e1f7f..ac5d3c1b15f82 100644 --- a/Makefile +++ b/Makefile @@ -849,6 +849,11 @@ docker-binaries: clean enter: make -C build.assets enter +# Interactively enters a Docker container, as root (which you can build and run Teleport inside of) +.PHONY:enter-root +enter-root: + make -C build.assets enter-root + # Interactively enters a Docker container (which you can build and run Teleport inside of). # Similar to `enter`, but uses the centos7 container. .PHONY:enter/centos7 diff --git a/api/client/webclient/webconfig.go b/api/client/webclient/webconfig.go index 31bac4b8bb195..e67ca2b9004eb 100644 --- a/api/client/webclient/webconfig.go +++ b/api/client/webclient/webconfig.go @@ -21,18 +21,21 @@ import "github.com/gravitational/teleport/api/constants" const ( // WebConfigAuthProviderOIDCType is OIDC provider type WebConfigAuthProviderOIDCType = "oidc" - // WebConfigAuthProviderOIDCURL is OIDC webapi endpoint - WebConfigAuthProviderOIDCURL = "/v1/webapi/oidc/login/web?redirect_url=:redirect&connector_id=:providerName" + // WebConfigAuthProviderOIDCURL is OIDC webapi endpoint. + // redirect_url MUST be the last query param, see the comment in parseSSORequestParams for an explanation. + WebConfigAuthProviderOIDCURL = "/v1/webapi/oidc/login/web?connector_id=:providerName&redirect_url=:redirect" // WebConfigAuthProviderSAMLType is SAML provider type WebConfigAuthProviderSAMLType = "saml" - // WebConfigAuthProviderSAMLURL is SAML webapi endpoint - WebConfigAuthProviderSAMLURL = "/v1/webapi/saml/sso?redirect_url=:redirect&connector_id=:providerName" + // WebConfigAuthProviderSAMLURL is SAML webapi endpoint. + // redirect_url MUST be the last query param, see the comment in parseSSORequestParams for an explanation. + WebConfigAuthProviderSAMLURL = "/v1/webapi/saml/sso?connector_id=:providerName&redirect_url=:redirect" // WebConfigAuthProviderGitHubType is GitHub provider type WebConfigAuthProviderGitHubType = "github" // WebConfigAuthProviderGitHubURL is GitHub webapi endpoint - WebConfigAuthProviderGitHubURL = "/v1/webapi/github/login/web?redirect_url=:redirect&connector_id=:providerName" + // redirect_url MUST be the last query param, see the comment in parseSSORequestParams for an explanation. + WebConfigAuthProviderGitHubURL = "/v1/webapi/github/login/web?connector_id=:providerName&redirect_url=:redirect" ) // WebConfig is web application configuration served by the backend to be used in frontend apps. diff --git a/api/utils/sshutils/conn.go b/api/utils/sshutils/conn.go index bee97420e4a1a..a4abec1fbf8ec 100644 --- a/api/utils/sshutils/conn.go +++ b/api/utils/sshutils/conn.go @@ -59,10 +59,12 @@ func ConnectProxyTransport(sconn ssh.Conn, req *DialReq, exclusive bool) (*ChCon channel, discard, err := sconn.OpenChannel(constants.ChanTransport, nil) if err != nil { - ssh.DiscardRequests(discard) return nil, false, trace.Wrap(err) } + // DiscardRequests will return when the channel or underlying connection is closed. + go ssh.DiscardRequests(discard) + // Send a special SSH out-of-band request called "teleport-transport" // the agent on the other side will create a new TCP/IP connection to // 'addr' on its network and will start proxying that connection over diff --git a/api/utils/sshutils/conn_test.go b/api/utils/sshutils/conn_test.go new file mode 100644 index 0000000000000..a644d34c53d1c --- /dev/null +++ b/api/utils/sshutils/conn_test.go @@ -0,0 +1,170 @@ +/* +Copyright 2022 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sshutils + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "net" + "testing" + "time" + + "github.com/gravitational/teleport/api/constants" + "github.com/stretchr/testify/require" + "golang.org/x/crypto/ssh" +) + +type server struct { + listener net.Listener + config *ssh.ServerConfig + handler func(*ssh.ServerConn) + + cSigner ssh.Signer + hSigner ssh.Signer +} + +func (s *server) Run(errC chan error) { + for { + conn, err := s.listener.Accept() + if err != nil { + errC <- err + return + } + + go func() { + sconn, _, _, err := ssh.NewServerConn(conn, s.config) + if err != nil { + errC <- err + return + } + s.handler(sconn) + }() + } +} + +func (s *server) Stop() error { + return s.listener.Close() +} + +func generateSigner(t *testing.T) ssh.Signer { + private, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err) + + block := &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(private), + } + + privatePEM := pem.EncodeToMemory(block) + signer, err := ssh.ParsePrivateKey(privatePEM) + require.NoError(t, err) + + return signer +} + +func (s *server) GetClient(t *testing.T) (ssh.Conn, <-chan ssh.NewChannel, <-chan *ssh.Request) { + conn, err := net.Dial("tcp", s.listener.Addr().String()) + require.NoError(t, err) + + sconn, nc, r, err := ssh.NewClientConn(conn, "", &ssh.ClientConfig{ + Auth: []ssh.AuthMethod{ssh.PublicKeys(s.cSigner)}, + HostKeyCallback: ssh.FixedHostKey(s.hSigner.PublicKey()), + }) + require.NoError(t, err) + + return sconn, nc, r +} + +func newServer(t *testing.T, handler func(*ssh.ServerConn)) *server { + listener, err := net.Listen("tcp", "localhost:0") + require.NoError(t, err) + + cSigner := generateSigner(t) + hSigner := generateSigner(t) + + config := &ssh.ServerConfig{ + NoClientAuth: true, + } + config.AddHostKey(hSigner) + + return &server{ + listener: listener, + config: config, + handler: handler, + cSigner: cSigner, + hSigner: hSigner, + } +} + +// TestTransportError ensures ConnectProxyTransport does not block forever +// when an error occurs while opening the transport channel. +func TestTransportError(t *testing.T) { + handlerErrC := make(chan error, 1) + serverErrC := make(chan error, 1) + + server := newServer(t, func(sconn *ssh.ServerConn) { + _, _, err := ConnectProxyTransport(sconn, &DialReq{ + Address: "test", ServerID: "test", + }, false) + handlerErrC <- err + }) + + go server.Run(serverErrC) + t.Cleanup(func() { require.NoError(t, server.Stop()) }) + + sconn1, nc, _ := server.GetClient(t) + t.Cleanup(func() { require.Error(t, sconn1.Close()) }) + + channel := <-nc + require.Equal(t, channel.ChannelType(), constants.ChanTransport) + + sconn1.Close() + err := timeoutErrC(t, handlerErrC, time.Second*5) + require.Error(t, err) + + sconn2, nc, _ := server.GetClient(t) + t.Cleanup(func() { require.NoError(t, sconn2.Close()) }) + + channel = <-nc + require.Equal(t, channel.ChannelType(), constants.ChanTransport) + + err = channel.Reject(ssh.ConnectionFailed, "test reject") + require.NoError(t, err) + + err = timeoutErrC(t, handlerErrC, time.Second*5) + require.Error(t, err) + + select { + case err = <-serverErrC: + require.FailNow(t, err.Error()) + default: + } +} + +func timeoutErrC(t *testing.T, errC <-chan error, d time.Duration) error { + timeout := time.NewTimer(d) + select { + case err := <-errC: + return err + case <-timeout.C: + require.FailNow(t, "failed to receive on err channel in time") + } + + return nil +} diff --git a/assets/aws/Makefile b/assets/aws/Makefile index ab99a07b505de..734babbc28076 100644 --- a/assets/aws/Makefile +++ b/assets/aws/Makefile @@ -14,7 +14,7 @@ AWS_REGION ?= us-west-2 # This must be a _released_ version of Teleport, i.e. one which has binaries # available for download on https://gravitational.com/teleport/download # Unreleased versions will fail to build. -TELEPORT_VERSION ?= 9.0.4 +TELEPORT_VERSION ?= 9.1.0 # Teleport UID is the UID of a non-privileged 'teleport' user TELEPORT_UID ?= 1007 diff --git a/assets/monitoring/gops.py b/assets/monitoring/gops.py deleted file mode 100644 index e7b0133d73159..0000000000000 --- a/assets/monitoring/gops.py +++ /dev/null @@ -1,99 +0,0 @@ -''' -Copyright 2017 Gravitational, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -''' - -import fileinput -import collections -import sys -import json -import argparse - - -def collect(): - go = {} - prev_stack = "" - for line in sys.stdin: - if "goroutine" in line: - if prev_stack != "": - go[prev_stack] = go.setdefault(prev_stack,0) + 1 - prev_stack = "" - if ".go" in line: - parts = line.split() - prev_stack = parts[0] - print json.dumps(go) - -def diff(a, b): - with open(a) as fa: - ja = json.load(fa) - with open(b) as fb: - jb = json.load(fb) - diff = {} - not_in_a = {} - not_in_b = {} - total_diff = 0 - for key, count in ja.iteritems(): - if key in jb: - diff[key] = jb[key] - count - total_diff += diff[key] - else: - not_in_b[key] = count - total_diff += count - for key, count in jb.iteritems(): - if key not in ja: - not_in_a[key] = count - total_diff += count - - print "change from %s to %s" % (a, b) - for key, count in sorted(diff.iteritems()): - if count != 0: - print key, ": ", count - - print "not in a" - for key, count in sorted(not_in_a.iteritems()): - if count != 0: - print key, ": ", count - - print "not in b" - for key, count in sorted(not_in_b.iteritems()): - if count != 0: - print key, ": ", count - print "total diff:", total_diff - -parser = argparse.ArgumentParser( - description="parse and print diffs for go stack traces") - -subparsers = parser.add_subparsers( - title='subcommands', - description='valid subcommands', - help='pick one of the commands, collect or diff') - -ccollect = subparsers.add_parser( - 'collect', - help='collect collects the output from stack trace, e.g. gops stack | python go.py collect > /tmp/a') -ccollect.set_defaults(func='collect') - -cdiff = subparsers.add_parser( - 'diff', - help='diff diffs two collected stats, e.g. python go.py diff /tmp/a /tmp/b') -cdiff.add_argument("first_file") -cdiff.add_argument("second_file") -cdiff.set_defaults(func='diff') - -args = parser.parse_args() - -if args.func == 'collect': - collect() -else: - diff(args.first_file, args.second_file) diff --git a/build.assets/Makefile b/build.assets/Makefile index c1809a22185e5..e953be19cdbe3 100644 --- a/build.assets/Makefile +++ b/build.assets/Makefile @@ -316,6 +316,14 @@ enter: buildbox docker run $(DOCKERFLAGS) -ti $(NOROOT) \ -e HOME=$(SRCDIR)/build.assets -w $(SRCDIR) $(BUILDBOX) /bin/bash +# +# Starts a root shell inside the build container +# +.PHONY:enter-root +enter-root: buildbox + docker run $(DOCKERFLAGS) -ti \ + -e HOME=$(SRCDIR)/build.assets -w $(SRCDIR) $(BUILDBOX) /bin/bash + # # Starts shell inside the centos7 container # @@ -415,7 +423,7 @@ docsbox: .PHONY:test-docs test-docs: docsbox docker run --platform=linux/amd64 -i $(NOROOT) -v $$(pwd)/..:/src/content $(DOCSBOX) \ - /bin/sh -c "yarn markdown-lint" + /bin/sh -c "yarn markdown-lint-external-links" # # Print the Go version used to build Teleport. diff --git a/constants.go b/constants.go index 0ab4b78a40ca9..e134e1a6e030d 100644 --- a/constants.go +++ b/constants.go @@ -27,9 +27,6 @@ import ( // WebAPIVersion is a current webapi version const WebAPIVersion = "v1" -// ForeverTTL means that object TTL will not expire unless deleted -const ForeverTTL time.Duration = 0 - const ( // SSHAuthSock is the environment variable pointing to the // Unix socket the SSH agent is running on. @@ -146,9 +143,6 @@ const ( // ComponentBackend is a backend component ComponentBackend = "backend" - // ComponentCachingClient is a caching auth client - ComponentCachingClient = "client:cache" - // ComponentSubsystemProxy is the proxy subsystem. ComponentSubsystemProxy = "subsystem:proxy" @@ -529,6 +523,10 @@ const ( // allowed database users. TraitDBUsers = "db_users" + // TraitAWSRoleARNs is the name of the role variable used to store + // allowed AWS role ARNs. + TraitAWSRoleARNs = "aws_role_arns" + // TraitTeams is the name of the role variable use to store team // membership information TraitTeams = "github_teams" @@ -556,17 +554,15 @@ const ( // TraitInternalDBUsersVariable is the variable used to store allowed // database users for local accounts. TraitInternalDBUsersVariable = "{{internal.db_users}}" + + // TraitInternalAWSRoleARNs is the variable used to store allowed AWS + // role ARNs for local accounts. + TraitInternalAWSRoleARNs = "{{internal.aws_role_arns}}" ) // SCP is Secure Copy. const SCP = "scp" -// Root is *nix system administrator account name. -const Root = "root" - -// Administrator is the Windows system administrator account name. -const Administrator = "Administrator" - // AdminRoleName is the name of the default admin role for all local users if // another role is not explicitly assigned const AdminRoleName = "admin" @@ -657,9 +653,6 @@ const ( // EnvUserProfile is the home directory environment variable on Windows. EnvUserProfile = "USERPROFILE" - // KubeCAPath is a hardcode of mounted CA inside every pod of K8s - KubeCAPath = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" - // KubeRunTests turns on kubernetes tests KubeRunTests = "TEST_KUBE" @@ -768,6 +761,3 @@ const UserSingleUseCertTTL = time.Minute // StandardHTTPSPort is the default port used for the https URI scheme, // cf. RFC 7230 § 2.7.2. const StandardHTTPSPort = 443 - -// StandardRDPPort is the default port used for RDP. -const StandardRDPPort = 3389 diff --git a/docs/img/database-access/dbeaver-configure-server.png b/docs/img/database-access/dbeaver-configure-server.png index 67729221d6da5..9f5a12a66131b 100644 Binary files a/docs/img/database-access/dbeaver-configure-server.png and b/docs/img/database-access/dbeaver-configure-server.png differ diff --git a/docs/img/database-access/dbeaver-pg-configure-server.png b/docs/img/database-access/dbeaver-pg-configure-server.png new file mode 100644 index 0000000000000..12a597709fc09 Binary files /dev/null and b/docs/img/database-access/dbeaver-pg-configure-server.png differ diff --git a/docs/img/database-access/dbeaver-ssl.png b/docs/img/database-access/dbeaver-ssl.png deleted file mode 100644 index 00ae986a66d4b..0000000000000 Binary files a/docs/img/database-access/dbeaver-ssl.png and /dev/null differ diff --git a/docs/img/database-access/dbeaver-tls-settings.png b/docs/img/database-access/dbeaver-tls-settings.png deleted file mode 100644 index 6dc633ba08180..0000000000000 Binary files a/docs/img/database-access/dbeaver-tls-settings.png and /dev/null differ diff --git a/docs/img/database-access/guides/sqlserver/dbeaver-connection@2x.png b/docs/img/database-access/guides/sqlserver/dbeaver-connection@2x.png index be7e973075c14..9a5bdb7505e43 100644 Binary files a/docs/img/database-access/guides/sqlserver/dbeaver-connection@2x.png and b/docs/img/database-access/guides/sqlserver/dbeaver-connection@2x.png differ diff --git a/docs/pages/database-access/guides/gui-clients.mdx b/docs/pages/database-access/guides/gui-clients.mdx index 2d103f5504890..e6ca98ccf4417 100644 --- a/docs/pages/database-access/guides/gui-clients.mdx +++ b/docs/pages/database-access/guides/gui-clients.mdx @@ -79,6 +79,25 @@ Ensure that your environment includes the following: + +Starting the local database proxy with the `--tunnel` flag will create an +authenticated tunnel that you can use to connect to your database instances. +You won't need to configure any credentials when connecting to this tunnel. + +Here is an example on how to start the proxy: + +```bash +# First, login into the database. +$ tsh db login + +# Then, start the local proxy. +$ tsh proxy db --tunnel +Started authenticated tunnel for the database "" in cluster "" on 127.0.0.1:62652. +``` + +You can then connect to the address the proxy command returns, in our example it +is `127.0.0.1:62652`. + If you're using Teleport in [TLS routing](../../setup/operations/tls-routing.mdx) mode where each database protocol is multiplexed on the same web proxy port, use @@ -178,6 +197,20 @@ certificate, key and root certificate from the configuration above: Click "Save", and pgAdmin should immediately connect. If pgAdmin prompts you for password, leave the password field empty and click OK. +## PostgreSQL DBeaver +To connect to your PostgreSQL instance, use the authenticated proxy address. +This is `127.0.0.1:62652` in the example above (see the “Authenticated Proxy” +section on [Get connection information](./gui-clients.mdx#get-connection-information) +for more information). + +Use the "Database native" authentication with an empty password: + +![DBeaver Postgres Configure +Server](../../../img/database-access/dbeaver-pg-configure-server.png) + +Clicking on "Test connection" should return a connection success message. Then, +click on "Finish" to save the configuration. + ## MySQL Workbench [MySQL Workbench](https://www.mysql.com/products/workbench/) is a GUI @@ -219,7 +252,8 @@ In the search bar of the "Connect to a database" window that opens up, type "mys ![DBeaver Select Driver](../../../img/database-access/dbeaver-select-driver.png) -In the newly-opened "Connection Settings" tab, copy the `Server Host` and `Port` from the `tsh db config` output into the DBeaver config fields: +In the newly-opened "Connection Settings" tab, use the Host as `localhost` and +Port as the one returned by the proxy command (`62652` in the example above): ![DBeaver Select Configure Server](../../../img/database-access/dbeaver-configure-server.png) @@ -233,20 +267,8 @@ Authentication" box, and click "Ok" to save: ![DBeaver Driver Settings](../../../img/database-access/dbeaver-driver-settings.png) -Once you are back in the "Connection Settings" window, navigate to the "Driver -Properties" tab, scroll down to find the `enabledTLSProtocols` field and enter -"TLSv1.2" into the `Value` field: - -![DBeaver TLS Settings](../../../img/database-access/dbeaver-tls-settings.png) - -Navigate to the "SSL" tab, check the "Use SSL" box, uncheck the "Verify Server -Certificates" box, and copy the `CA Certificate`, `Client Certificate`, and -`Client Private Key` paths from the output of the `tsh` command you ran at the -beginning of this guide: - -![DBeaver SSL](../../../img/database-access/dbeaver-ssl.png) - -Click "Ok" to finish and DBeaver should connect to the remote MySQL server automatically. +Once you are back in the "Connection Settings" window, click "Ok" to finish and +DBeaver should connect to the remote MySQL server automatically. ## MongoDB Compass @@ -276,7 +298,7 @@ Click on the "Connect" button. ## SQL Server DBeaver In the DBeaver connection configuration menu, use your proxy's endpoint. This is -`localhost:4242` in the example above. (See +`localhost:62652` in the example above. (See [Get connection information](./gui-clients.mdx#get-connection-information) for more information.) diff --git a/docs/pages/machine-id/guides/ansible.mdx b/docs/pages/machine-id/guides/ansible.mdx index 2c43196ff4174..1ea06b8f4ff95 100644 --- a/docs/pages/machine-id/guides/ansible.mdx +++ b/docs/pages/machine-id/guides/ansible.mdx @@ -10,7 +10,8 @@ with a configuration file that is automatically managed by Machine ID. You will need the following tools to use Teleport with Ansible. -- [Teleport Enterprise](../../enterprise/introduction.mdx) or [Teleport Cloud](../../cloud/introduction.mdx) >= 9.0.0 +- The Teleport Auth Service and Proxy Service version >= 9.0.0, deployed on your own infrastructure or managed via Teleport Cloud. +- The `tsh` client tool version >= (=teleport.version=). - `ssh` OpenSSH tool - `ansible` >= (=ansible.min_version=) - Optional tool `jq` to process `JSON` output diff --git a/docs/pages/machine-id/introduction.mdx b/docs/pages/machine-id/introduction.mdx index 5b4c88136c77b..6b8c302c5c86f 100644 --- a/docs/pages/machine-id/introduction.mdx +++ b/docs/pages/machine-id/introduction.mdx @@ -13,9 +13,8 @@ role-based access controls and audit. Some of the things you can do with Machine ID: - Machines can retrieve short-lived SSH certificates for CI/CD pipelines. -- Machines can retrieve short-lived X.509 certificates for use with databases - or applications. [Coming in Teleport -9.1](https://goteleport.com/docs/preview/upcoming-releases/#teleport-91). +- Machines can retrieve short-lived X.509 certificates for use with databases. + [Coming in Teleport 9.2](https://goteleport.com/docs/preview/upcoming-releases/#teleport-92). - Configure role-based access controls and locking for machines. - Capture access events in the audit log. diff --git a/docs/pages/machine-id/reference/cli.mdx b/docs/pages/machine-id/reference/cli.mdx index bc5276be2a147..533b01a0b2cc0 100644 --- a/docs/pages/machine-id/reference/cli.mdx +++ b/docs/pages/machine-id/reference/cli.mdx @@ -38,12 +38,12 @@ $ tbot start \ | `-a/--auth-server` | Address of the Teleport Auth Server (On-Prem installs) or Teleport Cloud tenant. | | `--token` | A bot join token, if attempting to onboard a new bot; used on first connect. | | `--ca-pin` | CA pin to validate the Teleport Auth Server; used on first connect. | -| `--data-dir` | Directory to store internal bot data. Access to this directory should be limited. | +| `--data-dir` | Directory to store internal bot data. Access to this directory should be limited. | | `--destination-dir` | Directory to write short-lived machine certificates. | | `--certificate-ttl` | TTL of short-lived machine certificates. | | `--renewal-interval` | Interval at which short-lived certificates are renewed; must be less than the certificate TTL. | | `--join-method` | Method to use to join the cluster. Can be `token` or `iam`. | -| `--one-shot` | If set, quit after the first renewal. | +| `--oneshot` | If set, quit after the first renewal. | ## `tbot init` diff --git a/docs/pages/setup/guides/docker.mdx b/docs/pages/setup/guides/docker.mdx index e6b8b656ebfcf..e5e73a792d341 100644 --- a/docs/pages/setup/guides/docker.mdx +++ b/docs/pages/setup/guides/docker.mdx @@ -29,7 +29,7 @@ $ docker version # Version: (=docker.version=) ``` -- The `tsh` client tool, which ships with the `teleport` binary. Visit [Download Teleport](/teleport/download/) to download `tsh`. +- The `tsh` client tool, which ships with the `teleport` binary. Visit [Download Teleport](https://goteleport.com/download/) to download `tsh`. diff --git a/docs/pages/setup/reference/cli.mdx b/docs/pages/setup/reference/cli.mdx index e3be084d80af4..d147950796783 100644 --- a/docs/pages/setup/reference/cli.mdx +++ b/docs/pages/setup/reference/cli.mdx @@ -187,7 +187,7 @@ $ tsh ssh [] <[user@]host> [...] `<[user@]host> [...]` - `user` The login identity to use on the remote host. If `[user]` is not specified the user defaults to `$USER` or can be set with `--user`. If the flag `--user` and positional argument `[user]` are specified the arg `[user]` takes precedence. -- `host` A `nodename` of a cluster node or a +- `host` The `nodename` of a cluster Node or a label specification like `env=aws` to run on all matching hosts. - `command` The command to execute on a remote host. #### Flags @@ -219,6 +219,8 @@ $ tsh ssh --proxy proxy.example.com --user teleport -d root@grav-00 # `tsh ssh` takes the same arguments as OpenSSH client: $ tsh ssh -o ForwardAgent=yes root@grav-00 $ tsh ssh -o AddKeysToAgent=yes root@grav-00 +# Run `hostname` on all nodes with the `env: aws` label +$ tsh ssh root@env=aws hostname ``` ### tsh config @@ -744,7 +746,7 @@ can be exported with `tctl auth sign` or `tsh login --out=`. Note that when a `tctl` command is run locally on an Auth Service, the audit logs will show that it was performed by the Auth Service itself. - + To properly audit admin actions at scale, it is important to limit direct SSH access to the Auth Service with [Access Controls](../../access-controls/introduction.mdx) and ensure that diff --git a/examples/aws/cloudformation/ent.yaml b/examples/aws/cloudformation/ent.yaml index 1e1271fe6914a..5bb6c804cbac7 100644 --- a/examples/aws/cloudformation/ent.yaml +++ b/examples/aws/cloudformation/ent.yaml @@ -97,24 +97,24 @@ Mappings: t2.xlarge: {Arch: HVM64} AWSRegionArch2AMI: - # All AMIs from AWS - gravitational-teleport-ami-ent-9.0.4 + # All AMIs from AWS - gravitational-teleport-ami-ent-9.1.0 eu-north-1: {HVM64: ami-05ff5c0be3d4b8da4} - ap-south-1: {HVM64 : ami-0234c3fbf9683ac11} - eu-west-1: {HVM64 : ami-0eb161a9bbd45a688} - eu-west-2: {HVM64 : ami-079c2d1cc2ee457a6} + ap-south-1: {HVM64 : ami-04d9edbed2548bed2} + eu-west-1: {HVM64 : ami-0388189ff1e995b50} + eu-west-2: {HVM64 : ami-0eaada8773dcd04b3} eu-west-3: {HVM64: ami-087bdce4ab6a2964d} - ap-northeast-1: {HVM64 : ami-0cc7d8d2a5613cfcc} - ap-northeast-2: {HVM64 : ami-053273f4d86c39cb9} + ap-northeast-1: {HVM64 : ami-037db88be86d049eb} + ap-northeast-2: {HVM64 : ami-01b1ddba534df5b19} ap-northeast-3: {HVM64: ami-0a36f2dfdca83ea7d} - sa-east-1: {HVM64 : ami-03cfc15de5a76e237} - ca-central-1: {HVM64 : ami-0abcad8a75edbfb1b} - ap-southeast-1: {HVM64 : ami-01ce7db1eb73c2730} - ap-southeast-2: {HVM64 : ami-023b05e0c6db08968} - eu-central-1: {HVM64 : ami-0f60ce8a6528531ae} - us-east-1: {HVM64 : ami-0ceed74e000a11c0c} - us-east-2: {HVM64 : ami-00a256d5049be30f0} - us-west-1: {HVM64 : ami-0f5e1de6464ee5445} - us-west-2: {HVM64 : ami-0436f4db1d05efa65} + sa-east-1: {HVM64 : ami-070d271e52601be54} + ca-central-1: {HVM64 : ami-0dba8c8ae8b0c2f78} + ap-southeast-1: {HVM64 : ami-0491bf03f96de2c02} + ap-southeast-2: {HVM64 : ami-0261efc4b0f34d124} + eu-central-1: {HVM64 : ami-0422e573efe4516ac} + us-east-1: {HVM64 : ami-0126041ad6b709c64} + us-east-2: {HVM64 : ami-031b2d8f0377632aa} + us-west-1: {HVM64 : ami-0375edf3fb7978399} + us-west-2: {HVM64 : ami-0992d786aa06095b4} Resources: # Auth server setup diff --git a/examples/aws/cloudformation/oss.yaml b/examples/aws/cloudformation/oss.yaml index 646a1a45dddf5..dcc32220cdc87 100644 --- a/examples/aws/cloudformation/oss.yaml +++ b/examples/aws/cloudformation/oss.yaml @@ -97,24 +97,24 @@ Mappings: t2.xlarge: {Arch: HVM64} AWSRegionArch2AMI: - # All AMIs from AWS - gravitational-teleport-ami-oss-9.0.4 + # All AMIs from AWS - gravitational-teleport-ami-oss-9.1.0 eu-north-1: {HVM64: ami-0eef7480d85b07d78} - ap-south-1: {HVM64 : ami-006cd65545363063f} - eu-west-1: {HVM64 : ami-054434b6f18e722d3} - eu-west-2: {HVM64 : ami-0d41ee6c81b6c17ac} + ap-south-1: {HVM64 : ami-00f47213fe1122cc1} + eu-west-1: {HVM64 : ami-0a78a7678d1ad8856} + eu-west-2: {HVM64 : ami-0f84f1471d67da992} eu-west-3: {HVM64: ami-0211c6e2e821dd249} - ap-northeast-1: {HVM64 : ami-00b49f7b1ae5dd039} - ap-northeast-2: {HVM64 : ami-077a53e7d4a39516b} + ap-northeast-1: {HVM64 : ami-0520bf8513b9bd802} + ap-northeast-2: {HVM64 : ami-09bc480030969748a} ap-northeast-3: {HVM64: ami-02bb8618b75d025aa} - sa-east-1: {HVM64 : ami-02da17caa83529eed} - ca-central-1: {HVM64 : ami-02bac82ada2e1ff46} - ap-southeast-1: {HVM64 : ami-0d6c6914622de9a5e} - ap-southeast-2: {HVM64 : ami-0a087a88e71c30d8e} - eu-central-1: {HVM64 : ami-0a3761a4ee7c16855} - us-east-1: {HVM64 : ami-04c33916ecd10e452} - us-east-2: {HVM64 : ami-048f6ae4184bee715} - us-west-1: {HVM64 : ami-0af6920796cd9a89a} - us-west-2: {HVM64 : ami-062f37e6fa6f93b45} + sa-east-1: {HVM64 : ami-0031ed28b572dce7c} + ca-central-1: {HVM64 : ami-04ddcc3f72edd9a6f} + ap-southeast-1: {HVM64 : ami-0853655b8ea488474} + ap-southeast-2: {HVM64 : ami-0dc4f631abc049ebc} + eu-central-1: {HVM64 : ami-0aab16a13e939bf67} + us-east-1: {HVM64 : ami-08bafa3f9cf60f2d1} + us-east-2: {HVM64 : ami-05f55fb3fa2610ac8} + us-west-1: {HVM64 : ami-0f3ab12161eabb666} + us-west-2: {HVM64 : ami-0c57a600177e567f2} Resources: # Auth server setup diff --git a/examples/aws/terraform/AMIS.md b/examples/aws/terraform/AMIS.md index 455feb455609c..66a3a4740f362 100644 --- a/examples/aws/terraform/AMIS.md +++ b/examples/aws/terraform/AMIS.md @@ -6,65 +6,65 @@ is updated when new AMI versions are released. ### OSS ``` -# eu-north-1 v9.0.4 OSS: ami-03c13104b5dcfd1da -# ap-south-1 v9.0.4 OSS: ami-006cd65545363063f -# eu-west-1 v9.0.4 OSS: ami-054434b6f18e722d3 -# eu-west-2 v9.0.4 OSS: ami-0d41ee6c81b6c17ac -# eu-west-3 v9.0.4 OSS: ami-00c71fb34ef7bad43 -# ap-northeast-1 v9.0.4 OSS: ami-00b49f7b1ae5dd039 -# ap-northeast-2 v9.0.4 OSS: ami-077a53e7d4a39516b -# ap-northeast-3 v9.0.4 OSS: ami-0ee60041c55adbdba -# sa-east-1 v9.0.4 OSS: ami-02da17caa83529eed -# ca-central-1 v9.0.4 OSS: ami-02bac82ada2e1ff46 -# ap-southeast-1 v9.0.4 OSS: ami-0d6c6914622de9a5e -# ap-southeast-2 v9.0.4 OSS: ami-0a087a88e71c30d8e -# eu-central-1 v9.0.4 OSS: ami-0a3761a4ee7c16855 -# us-east-1 v9.0.4 OSS: ami-04c33916ecd10e452 -# us-east-2 v9.0.4 OSS: ami-048f6ae4184bee715 -# us-west-1 v9.0.4 OSS: ami-0af6920796cd9a89a -# us-west-2 v9.0.4 OSS: ami-062f37e6fa6f93b45 +# eu-north-1 v9.1.0 OSS: ami-001de0c0d31ac876b +# ap-south-1 v9.1.0 OSS: ami-00f47213fe1122cc1 +# eu-west-1 v9.1.0 OSS: ami-0a78a7678d1ad8856 +# eu-west-2 v9.1.0 OSS: ami-0f84f1471d67da992 +# eu-west-3 v9.1.0 OSS: ami-0e2514dc97b670278 +# ap-northeast-1 v9.1.0 OSS: ami-0520bf8513b9bd802 +# ap-northeast-2 v9.1.0 OSS: ami-09bc480030969748a +# ap-northeast-3 v9.1.0 OSS: ami-03acdebdaacc323b5 +# sa-east-1 v9.1.0 OSS: ami-0031ed28b572dce7c +# ca-central-1 v9.1.0 OSS: ami-04ddcc3f72edd9a6f +# ap-southeast-1 v9.1.0 OSS: ami-0853655b8ea488474 +# ap-southeast-2 v9.1.0 OSS: ami-0dc4f631abc049ebc +# eu-central-1 v9.1.0 OSS: ami-0aab16a13e939bf67 +# us-east-1 v9.1.0 OSS: ami-08bafa3f9cf60f2d1 +# us-east-2 v9.1.0 OSS: ami-05f55fb3fa2610ac8 +# us-west-1 v9.1.0 OSS: ami-0f3ab12161eabb666 +# us-west-2 v9.1.0 OSS: ami-0c57a600177e567f2 ``` ### Enterprise ``` -# eu-north-1 v9.0.4 Enterprise: ami-0616d808966acecd3 -# ap-south-1 v9.0.4 Enterprise: ami-0234c3fbf9683ac11 -# eu-west-1 v9.0.4 Enterprise: ami-0eb161a9bbd45a688 -# eu-west-2 v9.0.4 Enterprise: ami-079c2d1cc2ee457a6 -# eu-west-3 v9.0.4 Enterprise: ami-075cc90b383123fd6 -# ap-northeast-1 v9.0.4 Enterprise: ami-0cc7d8d2a5613cfcc -# ap-northeast-2 v9.0.4 Enterprise: ami-053273f4d86c39cb9 -# ap-northeast-3 v9.0.4 Enterprise: ami-073c4a149eec147fb -# sa-east-1 v9.0.4 Enterprise: ami-03cfc15de5a76e237 -# ca-central-1 v9.0.4 Enterprise: ami-0abcad8a75edbfb1b -# ap-southeast-1 v9.0.4 Enterprise: ami-01ce7db1eb73c2730 -# ap-southeast-2 v9.0.4 Enterprise: ami-023b05e0c6db08968 -# eu-central-1 v9.0.4 Enterprise: ami-0f60ce8a6528531ae -# us-east-1 v9.0.4 Enterprise: ami-0ceed74e000a11c0c -# us-east-2 v9.0.4 Enterprise: ami-00a256d5049be30f0 -# us-west-1 v9.0.4 Enterprise: ami-0f5e1de6464ee5445 -# us-west-2 v9.0.4 Enterprise: ami-0436f4db1d05efa65 +# eu-north-1 v9.1.0 Enterprise: ami-07657f12581b48cb1 +# ap-south-1 v9.1.0 Enterprise: ami-04d9edbed2548bed2 +# eu-west-1 v9.1.0 Enterprise: ami-0388189ff1e995b50 +# eu-west-2 v9.1.0 Enterprise: ami-0eaada8773dcd04b3 +# eu-west-3 v9.1.0 Enterprise: ami-05ae2b5494bae00c8 +# ap-northeast-1 v9.1.0 Enterprise: ami-037db88be86d049eb +# ap-northeast-2 v9.1.0 Enterprise: ami-01b1ddba534df5b19 +# ap-northeast-3 v9.1.0 Enterprise: ami-00fadd3d5993c8dc9 +# sa-east-1 v9.1.0 Enterprise: ami-070d271e52601be54 +# ca-central-1 v9.1.0 Enterprise: ami-0dba8c8ae8b0c2f78 +# ap-southeast-1 v9.1.0 Enterprise: ami-0491bf03f96de2c02 +# ap-southeast-2 v9.1.0 Enterprise: ami-0261efc4b0f34d124 +# eu-central-1 v9.1.0 Enterprise: ami-0422e573efe4516ac +# us-east-1 v9.1.0 Enterprise: ami-0126041ad6b709c64 +# us-east-2 v9.1.0 Enterprise: ami-031b2d8f0377632aa +# us-west-1 v9.1.0 Enterprise: ami-0375edf3fb7978399 +# us-west-2 v9.1.0 Enterprise: ami-0992d786aa06095b4 ``` ### Enterprise FIPS ``` -# eu-north-1 v9.0.4 Enterprise FIPS: ami-0a714b181c052e8cc -# ap-south-1 v9.0.4 Enterprise FIPS: ami-08b892eec04bbd13d -# eu-west-1 v9.0.4 Enterprise FIPS: ami-03199de32fb0ac6d2 -# eu-west-2 v9.0.4 Enterprise FIPS: ami-0b9cdd3d5e0e13d3b -# eu-west-3 v9.0.4 Enterprise FIPS: ami-0fc3bcd85e18d16c3 -# ap-northeast-1 v9.0.4 Enterprise FIPS: ami-0dceee04d304a225f -# ap-northeast-2 v9.0.4 Enterprise FIPS: ami-0d43ea43dc27c3cf8 -# ap-northeast-3 v9.0.4 Enterprise FIPS: ami-00a2bdbfef701f14e -# sa-east-1 v9.0.4 Enterprise FIPS: ami-0003620a5703732b4 -# ca-central-1 v9.0.4 Enterprise FIPS: ami-0f1aff4ebecff4dff -# ap-southeast-1 v9.0.4 Enterprise FIPS: ami-08fc5ba3c946f1e71 -# ap-southeast-2 v9.0.4 Enterprise FIPS: ami-066a0841557114e9b -# eu-central-1 v9.0.4 Enterprise FIPS: ami-0d0f8f005534a2216 -# us-east-1 v9.0.4 Enterprise FIPS: ami-0c094571289de65b3 -# us-east-2 v9.0.4 Enterprise FIPS: ami-0213058cb9c251abd -# us-west-1 v9.0.4 Enterprise FIPS: ami-04c373b56dcaebe05 -# us-west-2 v9.0.4 Enterprise FIPS: ami-0b82a06b815c959e2 +# eu-north-1 v9.1.0 Enterprise FIPS: ami-0de6ccf671fe466f5 +# ap-south-1 v9.1.0 Enterprise FIPS: ami-03b514875e071d633 +# eu-west-1 v9.1.0 Enterprise FIPS: ami-0ee1a3c07e49fbb17 +# eu-west-2 v9.1.0 Enterprise FIPS: ami-02ed1ff2f415e9f55 +# eu-west-3 v9.1.0 Enterprise FIPS: ami-0766750087512a72c +# ap-northeast-1 v9.1.0 Enterprise FIPS: ami-0960de91f6155ca83 +# ap-northeast-2 v9.1.0 Enterprise FIPS: ami-0e885f12fd6354845 +# ap-northeast-3 v9.1.0 Enterprise FIPS: ami-02accaca2f91655ea +# sa-east-1 v9.1.0 Enterprise FIPS: ami-03ec39bea4da3a74e +# ca-central-1 v9.1.0 Enterprise FIPS: ami-0dfee22e417f5f31a +# ap-southeast-1 v9.1.0 Enterprise FIPS: ami-0ea0cb82a382e4378 +# ap-southeast-2 v9.1.0 Enterprise FIPS: ami-0ceb518f771825e54 +# eu-central-1 v9.1.0 Enterprise FIPS: ami-00ac040f1210a81ac +# us-east-1 v9.1.0 Enterprise FIPS: ami-0f58696c730f4b783 +# us-east-2 v9.1.0 Enterprise FIPS: ami-00726f85bea1cc579 +# us-west-1 v9.1.0 Enterprise FIPS: ami-07126cc4f51b5c9c6 +# us-west-2 v9.1.0 Enterprise FIPS: ami-046d9890d5982db2a ``` diff --git a/examples/aws/terraform/ha-autoscale-cluster/README.md b/examples/aws/terraform/ha-autoscale-cluster/README.md index 0635cd2680cb2..b097ad07d05b7 100644 --- a/examples/aws/terraform/ha-autoscale-cluster/README.md +++ b/examples/aws/terraform/ha-autoscale-cluster/README.md @@ -45,7 +45,7 @@ export TF_VAR_cluster_name="teleport.example.com" # OSS: aws ec2 describe-images --owners 126027368216 --filters 'Name=name,Values=gravitational-teleport-ami-oss*' # Enterprise: aws ec2 describe-images --owners 126027368216 --filters 'Name=name,Values=gravitational-teleport-ami-ent*' # FIPS 140-2 images are also available for Enterprise customers, look for '-fips' on the end of the AMI's name -export TF_VAR_ami_name="gravitational-teleport-ami-ent-9.0.4" +export TF_VAR_ami_name="gravitational-teleport-ami-ent-9.1.0" # AWS SSH key name to provision in installed instances, should be available in the region export TF_VAR_key_name="example" diff --git a/examples/aws/terraform/ha-autoscale-cluster/ansible/upgrade.yaml b/examples/aws/terraform/ha-autoscale-cluster/ansible/upgrade.yaml index c01844e80fee9..7237acbab4d41 100644 --- a/examples/aws/terraform/ha-autoscale-cluster/ansible/upgrade.yaml +++ b/examples/aws/terraform/ha-autoscale-cluster/ansible/upgrade.yaml @@ -20,14 +20,6 @@ - include: vars.yaml - - name: Copy gops to target box (for debugging) - when: gops_path is defined - block: - - copy: - src: "{{gops_path}}" - dest: /usr/local/bin - mode: 0755 - - name: Copy binaries from local directory when: teleport_path is defined block: diff --git a/examples/aws/terraform/ha-autoscale-cluster/bastion.tf b/examples/aws/terraform/ha-autoscale-cluster/bastion.tf index 5ccb425ac948f..8b9564f2007fd 100644 --- a/examples/aws/terraform/ha-autoscale-cluster/bastion.tf +++ b/examples/aws/terraform/ha-autoscale-cluster/bastion.tf @@ -1,6 +1,6 @@ // Bastion is an emergency access bastion -// that could be spinned up on demand in case if -// of need to have emrergency administrative access +// that could be spun up on demand in case +// of the need to have emergency administrative access resource "aws_instance" "bastion" { count = "1" ami = data.aws_ami.base.id diff --git a/examples/aws/terraform/starter-cluster/README.md b/examples/aws/terraform/starter-cluster/README.md index 10ce597dc6edf..0d5f0be3df59e 100644 --- a/examples/aws/terraform/starter-cluster/README.md +++ b/examples/aws/terraform/starter-cluster/README.md @@ -86,7 +86,7 @@ TF_VAR_license_path ?="/path/to/license" # OSS: aws ec2 describe-images --owners 126027368216 --filters 'Name=name,Values=gravitational-teleport-ami-oss*' # Enterprise: aws ec2 describe-images --owners 126027368216 --filters 'Name=name,Values=gravitational-teleport-ami-ent*' # FIPS 140-2 images are also available for Enterprise customers, look for '-fips' on the end of the AMI's name -TF_VAR_ami_name ?="gravitational-teleport-ami-ent-9.0.4" +TF_VAR_ami_name ?="gravitational-teleport-ami-ent-9.1.0" # Route 53 hosted zone to use, must be a root zone registered in AWS, e.g. example.com TF_VAR_route53_zone ?="example.com" diff --git a/go.mod b/go.mod index b97146025facb..dafbb797b5fd1 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,6 @@ require ( github.com/beevik/etree v1.1.0 github.com/coreos/go-oidc v0.0.0 github.com/coreos/go-semver v0.3.0 - github.com/davecgh/go-spew v1.1.1 github.com/denisenkom/go-mssqldb v0.11.0 github.com/duo-labs/webauthn v0.0.0-20210727191636-9f1b88ef44cc github.com/dustin/go-humanize v1.0.0 @@ -42,7 +41,6 @@ require ( github.com/golang/protobuf v1.5.2 github.com/google/btree v1.0.1 github.com/google/go-cmp v0.5.6 - github.com/google/gops v0.3.14 github.com/google/uuid v1.2.0 github.com/gorilla/websocket v1.4.2 github.com/gravitational/configure v0.0.0-20180808141939-c3428bd84c23 @@ -67,11 +65,9 @@ require ( github.com/joshlf/go-acl v0.0.0-20200411065538-eae00ae38531 github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 - github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 github.com/keys-pub/go-libfido2 v1.5.3-0.20220306005615-8ab03fb1ec27 github.com/kr/pretty v0.3.0 github.com/kr/pty v1.1.8 - github.com/kylelemons/godebug v1.1.0 github.com/mailgun/lemma v0.0.0-20170619173223-4214099fb348 github.com/mailgun/timetools v0.0.0-20170619190023-f3a7b8ffff47 github.com/mailgun/ttlmap v0.0.0-20170619185759-c1c17f74874f @@ -152,6 +148,7 @@ require ( github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect github.com/creack/pty v1.1.11 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect @@ -199,6 +196,7 @@ require ( github.com/joshlf/testutil v0.0.0-20170608050642-b5d8aa79d93d // indirect github.com/klauspost/compress v1.9.5 // indirect github.com/kr/text v0.2.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailgun/metrics v0.0.0-20150124003306-2b3c4565aafd // indirect github.com/mailgun/minheap v0.0.0-20170619185613-3dbe6c6bf55f // indirect diff --git a/go.sum b/go.sum index 1c4ccf49dddb9..3aa49e12830f0 100644 --- a/go.sum +++ b/go.sum @@ -100,7 +100,6 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/ThalesIgnite/crypto11 v1.2.4 h1:3MebRK/U0mA2SmSthXAIZAdUA9w8+ZuKem2O6HuR1f8= github.com/ThalesIgnite/crypto11 v1.2.4/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= @@ -315,7 +314,6 @@ github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= -github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -432,8 +430,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gops v0.3.14 h1:4Gpv4sABlEsVqrtKxiSynzD0//kzjTIUwUm5UgkGILI= -github.com/google/gops v0.3.14/go.mod h1:zjT9F4XsKzazOvdVad3+Zwga79UHKziX3r9TN05rVN8= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -671,11 +667,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= -github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= -github.com/keybase/go-ps v0.0.0-20190827175125-91aafc93ba19/go.mod h1:hY+WOq6m2FpbvyrI93sMaypsttvaIL5nhVR92dTMUcQ= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.9.5 h1:U+CaK85mrNNb4k8BNOfgJtJ/gr6kswUCFj6miSzVC6M= @@ -797,7 +790,6 @@ github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d/go.mod h1:IuKpRQcYE github.com/nsf/termbox-go v0.0.0-20210114135735-d04385b850e8 h1:3vzIuru1svOK2sXlg4XcrO3KkGRneIejmfQfR+ptSW8= github.com/nsf/termbox-go v0.0.0-20210114135735-d04385b850e8/go.mod h1:T0cTdVuOwf7pHQNtfhnEbzHbcNyCEcVU4YPpouCbVxo= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= @@ -809,7 +801,6 @@ github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= @@ -817,8 +808,6 @@ github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042 github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= @@ -933,7 +922,6 @@ github.com/sethvargo/go-diceware v0.2.1/go.mod h1:lH5Q/oSPMivseNdhMERAC7Ti5oOPqs github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63/go.mod h1:n+VKSARF5y/tS9XFSP7vWDfS+GUC5vs/YT7M5XDTUEM= github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500 h1:WnNuhiq+FOY3jNj6JXFT+eLN3CQ/oPIsDPRanvwsmbI= github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500/go.mod h1:+njLrG5wSeoG4Ds61rFgEzKvenR2UHbjMoDHsczxly0= -github.com/shirou/gopsutil v2.20.4+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= @@ -1292,7 +1280,6 @@ golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201207223542-d4d67f95c62d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1672,7 +1659,6 @@ k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/ launchpad.net/gocheck v0.0.0-20140225173054-000000000087 h1:Izowp2XBH6Ya6rv+hqbceQyw/gSGoXfH/UPoTGduL54= launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/goversion v1.2.0/go.mod h1:Eih9y/uIBS3ulggl7KNJ09xGSLcuNaLgmvvqa07sgfo= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= diff --git a/integration/integration_test.go b/integration/integration_test.go index c3804a72458a6..79f9a127415e3 100644 --- a/integration/integration_test.go +++ b/integration/integration_test.go @@ -189,6 +189,7 @@ func TestIntegrations(t *testing.T) { t.Run("PortForwarding", suite.bind(testPortForwarding)) t.Run("ProxyHostKeyCheck", suite.bind(testProxyHostKeyCheck)) t.Run("ReverseTunnelCollapse", suite.bind(testReverseTunnelCollapse)) + t.Run("Readyz", suite.bind(testReadyz)) t.Run("RotateChangeSigningAlg", suite.bind(testRotateChangeSigningAlg)) t.Run("RotateRollback", suite.bind(testRotateRollback)) t.Run("RotateSuccess", suite.bind(testRotateSuccess)) @@ -236,6 +237,7 @@ func testAuditOn(t *testing.T, suite *integrationTestSuite) { inForwardAgent: false, auditSessionsURI: t.TempDir(), }, { + comment: "recording proxy with upload to file server", inRecordLocation: types.RecordAtProxy, inForwardAgent: false, auditSessionsURI: t.TempDir(), @@ -3662,6 +3664,45 @@ func testPAM(t *testing.T, suite *integrationTestSuite) { } } +func testReadyz(t *testing.T, suite *integrationTestSuite) { + // TODO: test more service combinations + + recConfig, err := types.NewSessionRecordingConfigFromConfigFile(types.SessionRecordingConfigSpecV2{ + Mode: types.RecordOff, + }) + require.NoError(t, err) + + tconf := suite.defaultServiceConfig() + tconf.Auth.Enabled = true + tconf.Auth.SessionRecordingConfig = recConfig + tconf.Proxy.Enabled = true + tconf.Proxy.DisableWebInterface = true + tconf.Proxy.Kube.Enabled = true + // fire up the proxy kube service + tconf.Proxy.Kube.ListenAddr = utils.NetAddr{ + AddrNetwork: "tcp", + Addr: "127.0.0.1:0", + } + tconf.SSH.Enabled = false + tconf.DiagnosticAddr = utils.NetAddr{ + AddrNetwork: "tcp", + Addr: "127.0.0.1:0", + } + + teleport := suite.newTeleportWithConfig(t, nil, nil, tconf) + t.Cleanup(func() { require.NoError(t, teleport.StopAll()) }) + + diagAddr, err := teleport.Process.DiagnosticAddr() + require.NoError(t, err) + + require.Eventually(t, func() bool { + resp, err := http.Get(fmt.Sprintf("http://%s/readyz", diagAddr)) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + return resp.StatusCode == http.StatusOK + }, 5*time.Second, 500*time.Millisecond) +} + // testRotateSuccess tests full cycle cert authority rotation func testRotateSuccess(t *testing.T, suite *integrationTestSuite) { tr := utils.NewTracer(utils.ThisFunction()).Start() diff --git a/integration/utmp_integration_test.go b/integration/utmp_integration_test.go index 1b29e4fa8e644..33c123e190adb 100644 --- a/integration/utmp_integration_test.go +++ b/integration/utmp_integration_test.go @@ -262,7 +262,7 @@ func newSrvCtx(ctx context.Context, t *testing.T) *SrvCtx { nodeDir, "", utils.NetAddr{}, - nil, + s.nodeClient, regular.SetUUID(s.nodeID), regular.SetNamespace(apidefaults.Namespace), regular.SetEmitter(s.nodeClient), diff --git a/lib/auth/auth.go b/lib/auth/auth.go index be9a7b8de7f60..d1204ceac421f 100644 --- a/lib/auth/auth.go +++ b/lib/auth/auth.go @@ -3664,6 +3664,7 @@ const ( type oidcClient struct { client *oidc.Client config oidc.ClientConfig + cancel context.CancelFunc } // samlProvider is internal structure that stores SAML client and its config diff --git a/lib/auth/oidc.go b/lib/auth/oidc.go index dfede51a7a00c..1d8a21dec95c9 100644 --- a/lib/auth/oidc.go +++ b/lib/auth/oidc.go @@ -67,6 +67,7 @@ func (a *Server) getOIDCClient(conn types.OIDCConnector) (*oidc.Client, error) { return clientPack.client, nil } + clientPack.cancel() delete(a.oidcClients, conn.GetName()) return nil, trace.NotFound("connector %v has updated the configuration and is invalidated", conn.GetName()) @@ -79,26 +80,36 @@ func (a *Server) createOIDCClient(conn types.OIDCConnector) (*oidc.Client, error return nil, trace.Wrap(err) } - doneSyncing := make(chan struct{}) + // SyncProviderConfig doesn't take a context for cancellation, instead it + // returns a channel that has to be closed to stop the sync. To ensure that + // the sync is eventually stopped we create a child context of the server context, which + // is cancelled either on deletion of the connector or shutdown of the server. + // This will cause syncCtx.Done() to unblock, at which point we can close the stop channel. + firstSync := make(chan struct{}) + syncCtx, syncCancel := context.WithCancel(a.closeCtx) go func() { - defer close(doneSyncing) - client.SyncProviderConfig(conn.GetIssuerURL()) + stop := client.SyncProviderConfig(conn.GetIssuerURL()) + close(firstSync) + <-syncCtx.Done() + close(stop) }() select { - case <-doneSyncing: + case <-firstSync: case <-time.After(defaults.WebHeadersTimeout): + syncCancel() return nil, trace.ConnectionProblem(nil, "timed out syncing oidc connector %v, ensure URL %q is valid and accessible and check configuration", conn.GetName(), conn.GetIssuerURL()) case <-a.closeCtx.Done(): + syncCancel() return nil, trace.ConnectionProblem(nil, "auth server is shutting down") } a.lock.Lock() defer a.lock.Unlock() - a.oidcClients[conn.GetName()] = &oidcClient{client: client, config: config} + a.oidcClients[conn.GetName()] = &oidcClient{client: client, config: config, cancel: syncCancel} return client, nil } diff --git a/lib/auth/session_access.go b/lib/auth/session_access.go index 7e854e57680df..a271b7c9cb1b3 100644 --- a/lib/auth/session_access.go +++ b/lib/auth/session_access.go @@ -149,13 +149,12 @@ func (e *SessionAccessEvaluator) matchesJoin(allow *types.SessionJoinPolicy) boo return false } - for _, policySet := range e.policySets { - for _, allowRole := range allow.Roles { - expr := utils.GlobToRegexp(policySet.Name) - // GlobToRegexp makes sure this is always a valid regexp. - matched, _ := regexp.MatchString(expr, allowRole) + for _, allowRole := range allow.Roles { + // GlobToRegexp makes sure this is always a valid regexp. + expr := regexp.MustCompile(utils.GlobToRegexp(allowRole)) - if matched { + for _, policySet := range e.policySets { + if expr.MatchString(policySet.Name) { return true } } diff --git a/lib/auth/session_access_test.go b/lib/auth/session_access_test.go index 50faddc0b9618..bf48710454ad2 100644 --- a/lib/auth/session_access_test.go +++ b/lib/auth/session_access_test.go @@ -219,6 +219,30 @@ func successJoinTestCase(t *testing.T) joinTestCase { } } +func successGlobJoinTestCase(t *testing.T) joinTestCase { + hostRole, err := types.NewRole("host", types.RoleSpecV5{}) + require.NoError(t, err) + participantRole, err := types.NewRole("participant", types.RoleSpecV5{}) + require.NoError(t, err) + + participantRole.SetSessionJoinPolicies([]*types.SessionJoinPolicy{{ + Roles: []string{"*"}, + Kinds: []string{string(types.SSHSessionKind)}, + Modes: []string{string("*")}, + }}) + + return joinTestCase{ + name: "success", + host: hostRole, + sessionKind: types.SSHSessionKind, + participant: SessionAccessContext{ + Username: "participant", + Roles: []types.Role{participantRole}, + }, + expected: true, + } +} + func failRoleJoinTestCase(t *testing.T) joinTestCase { hostRole, err := types.NewRole("host", types.RoleSpecV5{}) require.NoError(t, err) @@ -264,6 +288,7 @@ func failKindJoinTestCase(t *testing.T) joinTestCase { func TestSessionAccessJoin(t *testing.T) { testCases := []joinTestCase{ successJoinTestCase(t), + successGlobJoinTestCase(t), failRoleJoinTestCase(t), failKindJoinTestCase(t), } diff --git a/lib/auth/webauthncli/doc.go b/lib/auth/webauthncli/doc.go new file mode 100644 index 0000000000000..7ecca3db48e68 --- /dev/null +++ b/lib/auth/webauthncli/doc.go @@ -0,0 +1,40 @@ +// Copyright 2022 Gravitational, Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package webauthncli provides the client-side implementation for WebAuthn. +// +// There are two separate implementations contained within the package: +// +// * A U2F (aka CTAP1), pure Go implementation, backed by flynn/u2f +// * A FIDO2 (aka CTAP1/CTAP2) implementation, backed by Yubico's libfido2 +// +// High-level API methods prefer the FIDO2 implementation, falling back to U2F +// if the binary isn't compiled with libfido2 support. Note that passwordless +// requires CTAP2. +// +// The FIDO2 implementation is protected by the `libfido2` build tag. In order +// to build FIDO2-enabled code, in addition to setting the build tag (eg, `go +// build -tags=libfido2 ./tool/tsh` or `go test -tags=libfido2 +// ./lib/auth/webauthncli`), you must first have libfido2 installed in your +// system. +// +// To install libfido2 [follow its installation instructions]( +// https://github.com/Yubico/libfido2#installation). See +// [gravitational/go-libfido2](https://github.com/gravitational/go-libfido2#why-fork) +// for additional build options. +// +// Refer to +// [buildbox](https://github.com/gravitational/teleport/blob/master/build.assets/Dockerfile#L10) +// for the library versions used by release binaries. +package webauthncli diff --git a/lib/backend/firestore/firestorebk.go b/lib/backend/firestore/firestorebk.go index a051544f64870..3c6ee5fae0430 100644 --- a/lib/backend/firestore/firestorebk.go +++ b/lib/backend/firestore/firestorebk.go @@ -23,6 +23,7 @@ import ( "cloud.google.com/go/firestore" apiv1 "cloud.google.com/go/firestore/apiv1/admin" + "github.com/gravitational/trace/trail" "google.golang.org/api/option" adminpb "google.golang.org/genproto/googleapis/firestore/admin/v1" "google.golang.org/grpc" @@ -201,6 +202,8 @@ const ( idDocProperty = "id" // timeInBetweenIndexCreationStatusChecks timeInBetweenIndexCreationStatusChecks = time.Second * 10 + // commitLimit is the maximum number of writes per commit + commitLimit = 500 ) // GetName is a part of backend API and it returns Firestore backend type @@ -411,23 +414,12 @@ func (b *Backend) GetRange(ctx context.Context, startKey []byte, endKey []byte, // DeleteRange deletes range of items with keys between startKey and endKey func (b *Backend) DeleteRange(ctx context.Context, startKey, endKey []byte) error { - docSnaps, err := b.getRangeDocs(ctx, startKey, endKey, backend.DefaultRangeLimit) + docs, err := b.getRangeDocs(ctx, startKey, endKey, backend.DefaultRangeLimit) if err != nil { return trace.Wrap(err) } - if len(docSnaps) == 0 { - // Nothing to delete. - return nil - } - batch := b.svc.Batch() - for _, docSnap := range docSnaps { - batch.Delete(docSnap.Ref) - } - _, err = batch.Commit(ctx) - if err != nil { - return ConvertGRPCError(err) - } - return nil + + return trace.Wrap(b.deleteDocuments(docs)) } // Get returns a single item or not found error @@ -706,23 +698,37 @@ func (b *Backend) purgeExpiredDocuments() error { return b.clientContext.Err() case <-t.C: expiryTime := b.clock.Now().UTC().Unix() - numDeleted := 0 - batch := b.svc.Batch() - docs, _ := b.svc.Collection(b.CollectionName).Where(expiresDocProperty, "<=", expiryTime).Documents(b.clientContext).GetAll() - for _, doc := range docs { - batch.Delete(doc.Ref) - numDeleted++ + docs, err := b.svc.Collection(b.CollectionName).Where(expiresDocProperty, "<=", expiryTime).Documents(b.clientContext).GetAll() + if err != nil { + b.Logger.WithError(trail.FromGRPC(err)).Warn("Failed to get expired documents") + continue } - if numDeleted > 0 { - _, err := batch.Commit(b.clientContext) - if err != nil { - return ConvertGRPCError(err) - } + + if err := b.deleteDocuments(docs); err != nil { + return trace.Wrap(err) } } } } +// deleteDocuments removes documents from firestore in batches to stay within the +// firestore write limits +func (b *Backend) deleteDocuments(docs []*firestore.DocumentSnapshot) error { + for i := 0; i < len(docs); i += commitLimit { + batch := b.svc.Batch() + + for j := 0; j < commitLimit && i+j < len(docs); j++ { + batch.Delete(docs[i+j].Ref) + } + + if _, err := batch.Commit(b.clientContext); err != nil { + return ConvertGRPCError(err) + } + } + + return nil +} + // ConvertGRPCError converts GRPC errors func ConvertGRPCError(err error, args ...interface{}) error { if err == nil { diff --git a/lib/backend/firestore/firestorebk_test.go b/lib/backend/firestore/firestorebk_test.go index 27adec9616838..e3e6e2b113b8a 100644 --- a/lib/backend/firestore/firestorebk_test.go +++ b/lib/backend/firestore/firestorebk_test.go @@ -16,20 +16,32 @@ package firestore import ( "context" + "errors" + "fmt" "net" "os" + "strings" "testing" "time" + "cloud.google.com/go/firestore" "github.com/gravitational/teleport/lib/backend" "github.com/gravitational/teleport/lib/backend/test" "github.com/gravitational/teleport/lib/utils" "github.com/gravitational/trace" - "github.com/jonboulle/clockwork" + "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" + "google.golang.org/api/option" adminpb "google.golang.org/genproto/googleapis/firestore/admin/v1" + firestorepb "google.golang.org/genproto/googleapis/firestore/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" ) func TestMain(m *testing.M) { @@ -76,7 +88,7 @@ func ensureEmulatorRunning(t *testing.T, cfg map[string]interface{}) { if err != nil { t.Skip("Firestore emulator is not running, start it with: gcloud beta emulators firestore start --host-port=localhost:8618") } - con.Close() + require.NoError(t, con.Close()) } func TestFirestoreDB(t *testing.T) { @@ -118,7 +130,7 @@ func newBackend(t *testing.T, cfg map[string]interface{}) *Backend { uut, err := New(context.Background(), cfg, Options{Clock: clock}) require.NoError(t, err) - t.Cleanup(func() { uut.Close() }) + t.Cleanup(func() { require.NoError(t, uut.Close()) }) return uut } @@ -169,3 +181,138 @@ func TestReadLegacyRecord(t *testing.T) { require.Equal(t, item.ID, got.ID) require.Equal(t, item.Expires, got.Expires) } + +type mockFirestoreServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + firestorepb.FirestoreServer + + reqs []proto.Message + + // If set, Commit returns this error. + commitErr error +} + +func (s *mockFirestoreServer) Commit(ctx context.Context, req *firestorepb.CommitRequest) (*firestorepb.CommitResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + + if len(req.Writes) > commitLimit { + return nil, status.Errorf(codes.InvalidArgument, "too many writes in a transaction") + } + + s.reqs = append(s.reqs, req) + if s.commitErr != nil { + return nil, s.commitErr + } + return &firestorepb.CommitResponse{ + WriteResults: []*firestorepb.WriteResult{{ + UpdateTime: timestamppb.Now(), + }}, + }, nil +} + +func TestDeleteDocuments(t *testing.T) { + t.Parallel() + cases := []struct { + name string + assertion require.ErrorAssertionFunc + responseErr error + commitErr error + documents int + }{ + { + name: "failed to commit", + assertion: require.Error, + commitErr: errors.New("failed to commit documents"), + documents: 1, + }, + { + name: "commit less than limit", + assertion: require.NoError, + documents: commitLimit - 123, + }, + { + name: "commit limit", + assertion: require.NoError, + documents: commitLimit, + }, + { + name: "commit more than limit", + assertion: require.NoError, + documents: (commitLimit * 3) + 173, + }, + } + + for _, tt := range cases { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + docs := make([]*firestore.DocumentSnapshot, 0, tt.documents) + for i := 0; i < tt.documents; i++ { + docs = append(docs, &firestore.DocumentSnapshot{ + Ref: &firestore.DocumentRef{ + Path: fmt.Sprintf("projects/test-project/databases/test-db/documents/test/%d", i+1), + }, + CreateTime: time.Now(), + UpdateTime: time.Now(), + }) + } + + mockFirestore := &mockFirestoreServer{ + commitErr: tt.commitErr, + } + srv := grpc.NewServer() + firestorepb.RegisterFirestoreServer(srv, mockFirestore) + + lis, err := net.Listen("tcp", "localhost:0") + require.NoError(t, err) + go func() { require.NoError(t, srv.Serve(lis)) }() + t.Cleanup(srv.Stop) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + require.NoError(t, err) + + client, err := firestore.NewClient(ctx, "test-project", option.WithGRPCConn(conn)) + require.NoError(t, err) + + b := &Backend{ + svc: client, + Entry: utils.NewLoggerForTests().WithFields(logrus.Fields{trace.Component: BackendName}), + clock: clockwork.NewFakeClock(), + clientContext: ctx, + clientCancel: cancel, + backendConfig: backendConfig{ + Config: Config{ + CollectionName: "test-collection", + }, + }, + } + + tt.assertion(t, b.deleteDocuments(docs)) + + if tt.documents == 0 { + return + } + + var committed int + for _, req := range mockFirestore.reqs { + switch r := req.(type) { + case *firestorepb.CommitRequest: + committed += len(r.Writes) + } + } + + require.Equal(t, tt.documents, committed) + + }) + } + +} diff --git a/lib/bpf/bpf.go b/lib/bpf/bpf.go index ce1a2c1b63c7b..e67b331e25dfb 100644 --- a/lib/bpf/bpf.go +++ b/lib/bpf/bpf.go @@ -37,7 +37,6 @@ import ( "github.com/gravitational/teleport/api/constants" apievents "github.com/gravitational/teleport/api/types/events" controlgroup "github.com/gravitational/teleport/lib/cgroup" - "github.com/gravitational/teleport/lib/defaults" "github.com/gravitational/teleport/lib/events" "github.com/gravitational/trace" @@ -47,6 +46,10 @@ import ( //go:embed bytecode var embedFS embed.FS +// ArgsCacheSize is the number of args events to store before dropping args +// events. +const ArgsCacheSize = 1024 + // SessionWatch is a map of cgroup IDs that the BPF service is watching and // emitting events for. type SessionWatch struct { @@ -153,7 +156,7 @@ func New(config *Config) (BPF, error) { } // Create args cache used by the exec BPF program. - s.argsCache, err = ttlmap.New(defaults.ArgsCacheSize) + s.argsCache, err = ttlmap.New(ArgsCacheSize) if err != nil { return nil, trace.Wrap(err) } diff --git a/lib/client/api.go b/lib/client/api.go index fa50ea4d22836..4489a78b2fafe 100644 --- a/lib/client/api.go +++ b/lib/client/api.go @@ -1968,7 +1968,7 @@ func (tc *TeleportClient) ListAppServersWithFilters(ctx context.Context, customF defer proxyClient.Close() filter := customFilter - if customFilter == nil { + if filter == nil { filter = &proto.ListResourcesRequest{ Namespace: tc.Namespace, Labels: tc.Labels, @@ -2027,7 +2027,7 @@ func (tc *TeleportClient) ListDatabaseServersWithFilters(ctx context.Context, cu defer proxyClient.Close() filter := customFilter - if customFilter == nil { + if filter == nil { filter = &proto.ListResourcesRequest{ Namespace: tc.Namespace, Labels: tc.Labels, @@ -2706,7 +2706,7 @@ func (tc *TeleportClient) localLogin(ctx context.Context, secondFactor constants // directLogin asks for a password + HOTP token, makes a request to CA via proxy func (tc *TeleportClient) directLogin(ctx context.Context, secondFactorType constants.SecondFactorType, pub []byte) (*auth.SSHLoginResponse, error) { - password, err := tc.AskPassword() + password, err := tc.AskPassword(ctx) if err != nil { return nil, trace.Wrap(err) } @@ -2714,7 +2714,7 @@ func (tc *TeleportClient) directLogin(ctx context.Context, secondFactorType cons // Only ask for a second factor if it's enabled. var otpToken string if secondFactorType == constants.SecondFactorOTP { - otpToken, err = tc.AskOTP() + otpToken, err = tc.AskOTP(ctx) if err != nil { return nil, trace.Wrap(err) } @@ -2742,7 +2742,7 @@ func (tc *TeleportClient) directLogin(ctx context.Context, secondFactorType cons // mfaLocalLogin asks for a password and performs the challenge-response authentication func (tc *TeleportClient) mfaLocalLogin(ctx context.Context, pub []byte) (*auth.SSHLoginResponse, error) { - password, err := tc.AskPassword() + password, err := tc.AskPassword(ctx) if err != nil { return nil, trace.Wrap(err) } @@ -3241,15 +3241,14 @@ func Username() (string, error) { } // AskOTP prompts the user to enter the OTP token. -func (tc *TeleportClient) AskOTP() (token string, err error) { - return prompt.Password(context.Background(), tc.Stderr, prompt.Stdin(), "Enter your OTP token") +func (tc *TeleportClient) AskOTP(ctx context.Context) (token string, err error) { + return prompt.Password(ctx, tc.Stderr, prompt.Stdin(), "Enter your OTP token") } // AskPassword prompts the user to enter the password -func (tc *TeleportClient) AskPassword() (pwd string, err error) { +func (tc *TeleportClient) AskPassword(ctx context.Context) (pwd string, err error) { return prompt.Password( - context.Background(), tc.Stderr, prompt.Stdin(), - fmt.Sprintf("Enter password for Teleport user %v", tc.Config.Username)) + ctx, tc.Stderr, prompt.Stdin(), fmt.Sprintf("Enter password for Teleport user %v", tc.Config.Username)) } // DELETE IN: 4.1.0 @@ -3324,7 +3323,7 @@ func (tc *TeleportClient) loadTLSConfig() (*tls.Config, error) { // ParseLabelSpec parses a string like 'name=value,"long name"="quoted value"` into a map like // { "name" -> "value", "long name" -> "quoted value" } func ParseLabelSpec(spec string) (map[string]string, error) { - tokens := []string{} + var tokens []string openQuotes := false var tokenStart, assignCount int specLen := len(spec) @@ -3371,11 +3370,11 @@ func ParseLabelSpec(spec string) (map[string]string, error) { // allowing a custom delimiter. Defaults to comma delimiter if not defined. func ParseSearchKeywords(spec string, customDelimiter rune) []string { delimiter := customDelimiter - if customDelimiter == 0 { + if delimiter == 0 { delimiter = rune(',') } - tokens := []string{} + var tokens []string openQuotes := false var tokenStart int specLen := len(spec) diff --git a/lib/client/api_test.go b/lib/client/api_test.go index 44c17681603a3..fdf0e2b785ca6 100644 --- a/lib/client/api_test.go +++ b/lib/client/api_test.go @@ -553,57 +553,73 @@ func TestNewClient_UseKeyPrincipals(t *testing.T) { func TestParseSearchKeywords(t *testing.T) { t.Parallel() - expected := [][]string{ - {}, - {"foo"}, - {"foo,bar", "some phrase's", "baz=qux's", "some other phrase", "another one"}, - {"服务器环境=测试,操作系统类别", "Linux", "机房=华北"}, + testCases := []struct { + name string + spec string + expected []string + }{ + { + name: "empty input", + spec: "", + }, + { + name: "simple input", + spec: "foo", + expected: []string{"foo"}, + }, + { + name: "complex input", + spec: `"foo,bar","some phrase's",baz=qux's ,"some other phrase"," another one "`, + expected: []string{"foo,bar", "some phrase's", "baz=qux's", "some other phrase", "another one"}, + }, + { + name: "unicode input", + spec: `"服务器环境=测试,操作系统类别", Linux , 机房=华北 `, + expected: []string{"服务器环境=测试,操作系统类别", "Linux", "机房=华北"}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + m := ParseSearchKeywords(tc.spec, ',') + require.Equal(t, tc.expected, m) + }) } + // Test default delimiter (which is a comma) + m := ParseSearchKeywords("foo,bar", rune(0)) + require.Equal(t, []string{"foo", "bar"}, m) +} + +func TestParseSearchKeywords_SpaceDelimiter(t *testing.T) { + t.Parallel() + testCases := []struct { - name string - delimiter rune - specs []string + name string + spec string + expected []string }{ { - name: "with comma delimiter", - delimiter: ',', - specs: []string{ - "", - "foo", - `"foo,bar","some phrase's",baz=qux's ,"some other phrase"," another one "`, - `"服务器环境=测试,操作系统类别", Linux , 机房=华北 `, - }, + name: "simple input", + spec: "foo", + expected: []string{"foo"}, }, { - name: "with 0 value delimiter (fallback to comma)", - specs: []string{ - "", - "foo", - `"foo,bar","some phrase's",baz=qux's ,"some other phrase"," another one "`, - `"服务器环境=测试,操作系统类别", Linux , 机房=华北 `, - }, + name: "complex input", + spec: `foo,bar "some phrase's" baz=qux's "some other phrase" " another one "`, + expected: []string{"foo,bar", "some phrase's", "baz=qux's", "some other phrase", "another one"}, }, { - name: "with space delimiter", - delimiter: ' ', - specs: []string{ - "", - "foo", - `foo,bar "some phrase's" baz=qux's "some other phrase" " another one "`, - `服务器环境=测试,操作系统类别 Linux 机房=华北 `, - }, + name: "unicode input", + spec: `服务器环境=测试,操作系统类别 Linux 机房=华北 `, + expected: []string{"服务器环境=测试,操作系统类别", "Linux", "机房=华北"}, }, } for _, tc := range testCases { - tc := tc t.Run(tc.name, func(t *testing.T) { - t.Parallel() - for i, spec := range tc.specs { - m := ParseSearchKeywords(spec, tc.delimiter) - require.Equal(t, expected[i], m) - } + m := ParseSearchKeywords(tc.spec, ' ') + require.Equal(t, tc.expected, m) }) } } diff --git a/lib/client/client.go b/lib/client/client.go index b245c704bdd22..f6671f83f0622 100644 --- a/lib/client/client.go +++ b/lib/client/client.go @@ -621,7 +621,7 @@ func (proxy *ProxyClient) FindNodesByFilters(ctx context.Context, req proto.List resources, err := client.GetResourcesWithFilters(ctx, site, req) if err != nil { - // ListResources for nodes not availalbe, provide fallback. + // ListResources for nodes not available, provide fallback. // Fallback does not support search/predicate support, so if users // provide them, it does nothing. // @@ -654,7 +654,7 @@ func (proxy *ProxyClient) FindAppServersByFilters(ctx context.Context, req proto resources, err := client.GetResourcesWithFilters(ctx, authClient, req) if err != nil { - // ListResources for app servers not availalbe, provide fallback. + // ListResources for app servers not available, provide fallback. // Fallback does not support filters, so if users // provide them, it does nothing. // @@ -729,7 +729,7 @@ func (proxy *ProxyClient) DeleteUserAppSessions(ctx context.Context, req *proto. return nil } -// FindDatabaseServersByFilters returns all registered database proxy servers. +// FindDatabaseServersByFilters returns registered database proxy servers that match the provided filter. func (proxy *ProxyClient) FindDatabaseServersByFilters(ctx context.Context, req proto.ListResourcesRequest) ([]types.DatabaseServer, error) { req.ResourceType = types.KindDatabaseServer authClient, err := proxy.CurrentClusterAccessPoint(ctx, false) @@ -739,7 +739,7 @@ func (proxy *ProxyClient) FindDatabaseServersByFilters(ctx context.Context, req resources, err := client.GetResourcesWithFilters(ctx, authClient, req) if err != nil { - // ListResources for db servers not availalbe, provide fallback. + // ListResources for db servers not available, provide fallback. // Fallback does not support filters, so if users // provide them, it does nothing. // diff --git a/lib/defaults/defaults.go b/lib/defaults/defaults.go index ffc80a74e9742..7e218b7f6f133 100644 --- a/lib/defaults/defaults.go +++ b/lib/defaults/defaults.go @@ -82,30 +82,18 @@ const ( // RDPListenPort is the standard port for RDP servers. RDPListenPort = 3389 - // Default DB to use for persisting state. Another options is "etcd" - BackendType = "bolt" - // BackendDir is a default backend subdirectory BackendDir = "backend" // BackendPath is a default backend path parameter BackendPath = "path" - // Name of events bolt database file stored in DataDir - EventsBoltFile = "events.db" - // By default SSH server (and SSH proxy) will bind to this IP BindIP = "0.0.0.0" // By default all users use /bin/bash DefaultShell = "/bin/bash" - // CacheTTL is a default cache TTL for persistent node cache - CacheTTL = 20 * time.Hour - - // RecentCacheTTL is a default cache TTL for recently accessed items - RecentCacheTTL = 2 * time.Second - // InviteTokenTTL sets the lifespan of tokens used for adding nodes and users // to a cluster InviteTokenTTL = 15 * time.Minute @@ -122,9 +110,6 @@ const ( // HTTPIdleTimeout is a default timeout for idle HTTP connections HTTPIdleTimeout = 30 * time.Second - // DefaultThrottleTimeout is a timemout used to throttle failed auth servers - DefaultThrottleTimeout = 10 * time.Second - // WebHeadersTimeout is a timeout that is set for web requests // before browsers raise "Timeout waiting web headers" error in // the browser @@ -199,9 +184,6 @@ const ( // for sync purposes HOTPFirstTokensRange = 4 - // HOTPTokenDigits is the number of digits in each token - HOTPTokenDigits = 6 - // MinPasswordLength is minimum password length MinPasswordLength = 6 @@ -223,9 +205,6 @@ const ( // ActiveSessionTTL is a TTL when session is marked as inactive ActiveSessionTTL = 30 * time.Second - // ActivePartyTTL is a TTL when party is marked as inactive - ActivePartyTTL = 30 * time.Second - // OIDCAuthRequestTTL is TTL of internally stored auth request created by client OIDCAuthRequestTTL = 10 * 60 * time.Second @@ -235,12 +214,8 @@ const ( // GithubAuthRequestTTL is TTL of internally stored Github auth request GithubAuthRequestTTL = 10 * 60 * time.Second - // OAuth2TTL is the default TTL for objects created during OAuth 2.0 flow - // such as web sessions, certificates or dynamically created users - OAuth2TTL = 60 * 60 * time.Second // 1 hour - // LogRotationPeriod defines how frequently to rotate the audit log file - LogRotationPeriod = (time.Hour * 24) + LogRotationPeriod = time.Hour * 24 // UploaderScanPeriod is a default uploader scan period UploaderScanPeriod = 5 * time.Second @@ -263,16 +238,6 @@ const ( // AttemptTTL is TTL for login attempt AttemptTTL = time.Minute * 30 - // AuditLogSessions is the default expected amount of concurrent sessions - // supported by Audit logger, this number limits the possible - // amount of simultaneously processes concurrent sessions by the - // Audit log server, and 16K is OK for now - AuditLogSessions = 16384 - - // AccessPointCachedValues is the default maximum amount of cached values - // in access point - AccessPointCachedValues = 16384 - // AuditLogTimeFormat is the format for the timestamp on audit log files. AuditLogTimeFormat = "2006-01-02.15:04:05" @@ -286,9 +251,6 @@ const ( // ClientCacheSize is the size of the RPC clients expiring cache ClientCacheSize = 1024 - // CSRSignTimeout is a default timeout for CSR request to be processed by K8s - CSRSignTimeout = 30 * time.Second - // Localhost is the address of localhost. Used for the default binding // address for port forwarding. Localhost = "127.0.0.1" @@ -393,9 +355,6 @@ var ( // DiskAlertInterval is disk space check interval. DiskAlertInterval = 5 * time.Minute - // TopRequestsCapacity sets up default top requests capacity - TopRequestsCapacity = 128 - // AuthQueueSize is auth service queue size AuthQueueSize = 8192 @@ -426,12 +385,6 @@ var ( // leases are refreshed at a rate of ~1/2 this duration). SessionControlTimeout = time.Minute * 2 - // SPDYPingPeriod is the period for sending out SPDY ping frames on inbound - // and outbound connections. SPDY is used for interactive Kubernetes - // connections. These pings are needed to avoid timeouts on load balancers - // that don't respect TCP keep-alives. - SPDYPingPeriod = 30 * time.Second - // AsyncBufferSize is a default buffer size for async emitters AsyncBufferSize = 1024 @@ -563,10 +516,6 @@ const ( // CgroupPath is where the cgroupv2 hierarchy will be mounted. CgroupPath = "/cgroup2" - - // ArgsCacheSize is the number of args events to store before dropping args - // events. - ArgsCacheSize = 1024 ) var ( @@ -580,9 +529,6 @@ var ( // StartRoles is default roles teleport assumes when started via 'start' command StartRoles = []string{RoleProxy, RoleNode, RoleAuthService, RoleApp, RoleDatabase} - // ETCDPrefix is default key in ETCD clustered configurations - ETCDPrefix = "/teleport" - // ConfigEnvar is a name of teleport's configuration environment variable ConfigEnvar = "TELEPORT_CONFIG" @@ -736,10 +682,6 @@ const ( ApplicationTokenAlgorithm = jose.RS256 ) -// WindowsOpenSSHNamedPipe is the address of the named pipe that the -// OpenSSH agent is on. -const WindowsOpenSSHNamedPipe = `\\.\pipe\openssh-ssh-agent` - var ( // FIPSCipherSuites is a list of supported FIPS compliant TLS cipher suites. FIPSCipherSuites = []uint16{ diff --git a/lib/events/complete.go b/lib/events/complete.go index 89c39c9090bab..3ab981c01c883 100644 --- a/lib/events/complete.go +++ b/lib/events/complete.go @@ -51,9 +51,6 @@ type UploadCompleterConfig struct { CheckPeriod time.Duration // Clock is used to override clock in tests Clock clockwork.Clock - // Unstarted does not start automatic goroutine, - // is useful when completer is embedded in another function - Unstarted bool } // CheckAndSetDefaults checks and sets default values @@ -76,23 +73,28 @@ func (cfg *UploadCompleterConfig) CheckAndSetDefaults() error { return nil } -// NewUploadCompleter returns a new instance of the upload completer -// the completer has to be closed to release resources and goroutines -func NewUploadCompleter(cfg UploadCompleterConfig) (*UploadCompleter, error) { +// StartNewUploadCompleter starts an upload completer background process. It can +// be closed by closing the provided context. +func StartNewUploadCompleter(ctx context.Context, cfg UploadCompleterConfig) error { + uc, err := newUploadCompleter(cfg) + if err != nil { + return trace.Wrap(err) + } + go uc.start(ctx) + return nil +} + +// newUploadCompleter returns a new instance of the upload completer without +// starting it. Useful in tests. +func newUploadCompleter(cfg UploadCompleterConfig) (*UploadCompleter, error) { if err := cfg.CheckAndSetDefaults(); err != nil { return nil, trace.Wrap(err) } - ctx, cancel := context.WithCancel(context.Background()) u := &UploadCompleter{ cfg: cfg, log: log.WithFields(log.Fields{ trace.Component: teleport.Component(cfg.Component, "completer"), }), - cancel: cancel, - closeCtx: ctx, - } - if !cfg.Unstarted { - go u.run() } return u, nil } @@ -100,13 +102,12 @@ func NewUploadCompleter(cfg UploadCompleterConfig) (*UploadCompleter, error) { // UploadCompleter periodically scans uploads that have not been completed // and completes them type UploadCompleter struct { - cfg UploadCompleterConfig - log *log.Entry - cancel context.CancelFunc - closeCtx context.Context + cfg UploadCompleterConfig + log *log.Entry } -func (u *UploadCompleter) run() { +// start starts a goroutine to periodically check for and complete abandoned uploads +func (u *UploadCompleter) start(ctx context.Context) { periodic := interval.New(interval.Config{ Duration: u.cfg.CheckPeriod, FirstDuration: utils.HalfJitter(u.cfg.CheckPeriod), @@ -117,17 +118,27 @@ func (u *UploadCompleter) run() { for { select { case <-periodic.Next(): - if err := u.CheckUploads(u.closeCtx); err != nil { + if err := u.checkUploads(ctx); err != nil { u.log.WithError(err).Warningf("Failed to check uploads.") } - case <-u.closeCtx.Done(): + case <-ctx.Done(): return } } } -// CheckUploads fetches uploads and completes any abandoned uploads -func (u *UploadCompleter) CheckUploads(ctx context.Context) error { +// checkUploads fetches uploads and completes any abandoned uploads +func (u *UploadCompleter) checkUploads(ctx context.Context) error { + trackers, err := u.cfg.SessionTracker.GetActiveSessionTrackers(ctx) + if err != nil { + return trace.Wrap(err) + } + + var activeSessionIDs []string + for _, st := range trackers { + activeSessionIDs = append(activeSessionIDs, st.GetSessionID()) + } + uploads, err := u.cfg.Uploader.ListUploads(ctx) if err != nil { return trace.Wrap(err) @@ -140,14 +151,10 @@ func (u *UploadCompleter) CheckUploads(ctx context.Context) error { } }() + // Complete upload for any uploads without an active session tracker for _, upload := range uploads { - // Check for an active session tracker for the session upload. - _, err := u.cfg.SessionTracker.GetSessionTracker(ctx, upload.SessionID.String()) - if err == nil { - // session appears to be active, don't complete the upload. + if apiutils.SliceContainsStr(activeSessionIDs, upload.SessionID.String()) { continue - } else if !trace.IsNotFound(err) { - return trace.Wrap(err) } parts, err := u.cfg.Uploader.ListParts(ctx, upload) @@ -207,12 +214,6 @@ func (u *UploadCompleter) CheckUploads(ctx context.Context) error { return nil } -// Close closes all outstanding operations without waiting -func (u *UploadCompleter) Close() error { - u.cancel() - return nil -} - func (u *UploadCompleter) ensureSessionEndEvent(ctx context.Context, uploadData UploadMetadata) error { // at this point, we don't know whether we'll need to emit a session.end or a // windows.desktop.session.end, but as soon as we see the session start we'll diff --git a/lib/events/complete_test.go b/lib/events/complete_test.go index 71c9a2ebbdc76..6f13de20ac6a4 100644 --- a/lib/events/complete_test.go +++ b/lib/events/complete_test.go @@ -57,24 +57,24 @@ func TestUploadCompleterCompletesAbandonedUploads(t *testing.T) { MockTrackers: []types.SessionTracker{sessionTracker}, } - uc, err := NewUploadCompleter(UploadCompleterConfig{ - Unstarted: true, + uc, err := newUploadCompleter(UploadCompleterConfig{ Uploader: mu, AuditLog: log, SessionTracker: sessionTrackerService, + Clock: clock, }) require.NoError(t, err) upload, err := mu.CreateUpload(context.Background(), sessionID) require.NoError(t, err) - err = uc.CheckUploads(context.Background()) + err = uc.checkUploads(context.Background()) require.NoError(t, err) require.False(t, mu.uploads[upload.ID].completed) clock.Advance(1 * time.Hour) - err = uc.CheckUploads(context.Background()) + err = uc.checkUploads(context.Background()) require.NoError(t, err) require.True(t, mu.uploads[upload.ID].completed) } @@ -99,8 +99,7 @@ func TestUploadCompleterEmitsSessionEnd(t *testing.T) { sessionEvents: []apievents.AuditEvent{test.startEvent}, } - uc, err := NewUploadCompleter(UploadCompleterConfig{ - Unstarted: true, + uc, err := newUploadCompleter(UploadCompleterConfig{ Uploader: mu, AuditLog: log, Clock: clock, @@ -116,7 +115,7 @@ func TestUploadCompleterEmitsSessionEnd(t *testing.T) { _, err = mu.UploadPart(context.Background(), *upload, 0, strings.NewReader("part")) require.NoError(t, err) - err = uc.CheckUploads(context.Background()) + err = uc.checkUploads(context.Background()) require.NoError(t, err) // advance the clock to force the asynchronous session end event emission diff --git a/lib/events/eventstest/mock.go b/lib/events/eventstest/mock.go index 3a19b6585aeb4..7411eaa9161ea 100644 --- a/lib/events/eventstest/mock.go +++ b/lib/events/eventstest/mock.go @@ -99,7 +99,14 @@ type MockSessionTrackerService struct { } func (m *MockSessionTrackerService) GetActiveSessionTrackers(ctx context.Context) ([]types.SessionTracker, error) { - return nil, nil + var trackers []types.SessionTracker + for _, tracker := range m.MockTrackers { + // mock session tracker expiration + if tracker.Expiry().After(m.Clock.Now()) { + trackers = append(trackers, tracker) + } + } + return trackers, nil } func (m *MockSessionTrackerService) GetSessionTracker(ctx context.Context, sessionID string) (types.SessionTracker, error) { diff --git a/lib/events/filesessions/fileasync.go b/lib/events/filesessions/fileasync.go index fc05d88074d0f..752e17e2fac9a 100644 --- a/lib/events/filesessions/fileasync.go +++ b/lib/events/filesessions/fileasync.go @@ -102,21 +102,10 @@ func NewUploader(cfg UploaderConfig, sessionTracker services.SessionTrackerServi if err != nil { return nil, trace.Wrap(err) } - // completer scans for uploads that have been initiated, but not completed - // by the client (aborted or crashed) and completes them - uploadCompleter, err := events.NewUploadCompleter(events.UploadCompleterConfig{ - Uploader: handler, - AuditLog: cfg.AuditLog, - Unstarted: true, - SessionTracker: sessionTracker, - }) - if err != nil { - return nil, trace.Wrap(err) - } + ctx, cancel := context.WithCancel(cfg.Context) uploader := &Uploader{ - uploadCompleter: uploadCompleter, - cfg: cfg, + cfg: cfg, log: log.WithFields(log.Fields{ trace.Component: cfg.Component, }), @@ -126,6 +115,19 @@ func NewUploader(cfg UploaderConfig, sessionTracker services.SessionTrackerServi semaphore: make(chan struct{}, cfg.ConcurrentUploads), eventsCh: make(chan events.UploadEvent, cfg.ConcurrentUploads), } + + // upload completer scans for uploads that have been initiated, but not completed + // by the client (aborted or crashed) and completes them. It will be closed once + // the uploader context is closed. + err = events.StartNewUploadCompleter(uploader.ctx, events.UploadCompleterConfig{ + Uploader: handler, + AuditLog: cfg.AuditLog, + SessionTracker: sessionTracker, + }) + if err != nil { + return nil, trace.Wrap(err) + } + return uploader, nil } @@ -145,9 +147,8 @@ func NewUploader(cfg UploaderConfig, sessionTracker services.SessionTrackerServi type Uploader struct { semaphore chan struct{} - cfg UploaderConfig - log *log.Entry - uploadCompleter *events.UploadCompleter + cfg UploaderConfig + log *log.Entry cancel context.CancelFunc ctx context.Context @@ -221,12 +222,6 @@ func (u *Uploader) Serve() error { // Tick at scan period but slow down (and speeds up) on errors. case <-backoff.After(): var failed bool - if err := u.uploadCompleter.CheckUploads(u.ctx); err != nil { - if trace.Unwrap(err) != errContext { - failed = true - u.log.WithError(err).Warningf("Completer scan failed.") - } - } if _, err := u.Scan(); err != nil { if trace.Unwrap(err) != errContext { failed = true @@ -303,9 +298,8 @@ func (u *Uploader) sessionErrorFilePath(sid session.ID) string { } // Close closes all operations -func (u *Uploader) Close() error { +func (u *Uploader) Close() { u.cancel() - return u.uploadCompleter.Close() } type upload struct { diff --git a/lib/events/filesessions/fileasync_test.go b/lib/events/filesessions/fileasync_test.go index e6e219c9d93de..038cb48a063af 100644 --- a/lib/events/filesessions/fileasync_test.go +++ b/lib/events/filesessions/fileasync_test.go @@ -447,9 +447,6 @@ type uploaderPack struct { func (u *uploaderPack) Close(t *testing.T) { u.cancel() - - err := u.uploader.Close() - require.NoError(t, err) } func newUploaderPack(t *testing.T, wrapStreamer wrapStreamerFn) uploaderPack { diff --git a/lib/fixtures/fixtures.go b/lib/fixtures/fixtures.go index cc06fcdd44b8c..daf92b08a7fe8 100644 --- a/lib/fixtures/fixtures.go +++ b/lib/fixtures/fixtures.go @@ -19,9 +19,8 @@ import ( "runtime/debug" "testing" - "github.com/davecgh/go-spew/spew" + "github.com/google/go-cmp/cmp" "github.com/gravitational/trace" - "github.com/kylelemons/godebug/diff" check "gopkg.in/check.v1" ) @@ -104,10 +103,8 @@ func AssertConnectionProblem(t *testing.T, err error) { // DeepCompare uses gocheck DeepEquals but provides nice diff if things are not equal func DeepCompare(c *check.C, a, b interface{}) { - d := &spew.ConfigState{Indent: " ", DisableMethods: true, DisablePointerMethods: true, DisablePointerAddresses: true} - if !reflect.DeepEqual(a, b) { - c.Fatalf("Values are not equal\n%v\nStack:\n%v\n", diff.Diff(d.Sdump(a), d.Sdump(b)), string(debug.Stack())) + c.Fatalf("Values are not equal, diff:\n%s\nStack:\n%v\n", cmp.Diff(a, b), string(debug.Stack())) } } diff --git a/lib/kube/proxy/server.go b/lib/kube/proxy/server.go index 0c3cc58ab9160..5ab727efab0c1 100644 --- a/lib/kube/proxy/server.go +++ b/lib/kube/proxy/server.go @@ -156,6 +156,8 @@ func NewTLSServer(cfg TLSServerConfig) (*TLSServer, error) { } } else { log.Debug("No local kube credentials on proxy, will not start kubernetes_service heartbeats") + // Report the component as being ready. + cfg.OnHeartbeat(nil) } return server, nil diff --git a/lib/service/service.go b/lib/service/service.go index 8b59680837bd5..52b8da2fbcc0d 100644 --- a/lib/service/service.go +++ b/lib/service/service.go @@ -1261,10 +1261,9 @@ func (process *TeleportProcess) initAuthService() error { process.setLocalAuth(authServer) // Upload completer is responsible for checking for initiated but abandoned - // session uploads and completing them - var uploadCompleter *events.UploadCompleter + // session uploads and completing them. it will be closed once the process exits. if uploadHandler != nil { - uploadCompleter, err = events.NewUploadCompleter(events.UploadCompleterConfig{ + err = events.StartNewUploadCompleter(process.ExitContext(), events.UploadCompleterConfig{ Uploader: uploadHandler, Component: teleport.ComponentAuth, AuditLog: process.auditLog, @@ -1486,9 +1485,6 @@ func (process *TeleportProcess) initAuthService() error { // of the auth server basically never exits. warnOnErr(tlsServer.Close(), log) } - if uploadCompleter != nil { - warnOnErr(uploadCompleter.Close(), log) - } log.Info("Exited.") }) return nil @@ -2111,7 +2107,7 @@ func (process *TeleportProcess) initUploaderService(streamer events.Streamer, au process.OnExit("fileuploader.shutdown", func(payload interface{}) { log.Infof("File uploader is shutting down.") - warnOnErr(fileUploader.Close(), log) + fileUploader.Close() log.Infof("File uploader has shut down.") }) diff --git a/lib/services/authority.go b/lib/services/authority.go index b29da19bc4e4d..088b39c69f3b2 100644 --- a/lib/services/authority.go +++ b/lib/services/authority.go @@ -397,6 +397,18 @@ func UnmarshalCertAuthority(bytes []byte, opts ...MarshalOption) (types.CertAuth if cfg.ID != 0 { ca.SetResourceID(cfg.ID) } + // Correct problems with existing CAs that contain non-UTC times, which + // causes panics when doing a gogoproto Clone; should only ever be + // possible with LastRotated, but we enforce it on all the times anyway. + // See https://github.com/gogo/protobuf/issues/519 . + if ca.Spec.Rotation != nil { + apiutils.UTC(&ca.Spec.Rotation.Started) + apiutils.UTC(&ca.Spec.Rotation.LastRotated) + apiutils.UTC(&ca.Spec.Rotation.Schedule.UpdateClients) + apiutils.UTC(&ca.Spec.Rotation.Schedule.UpdateServers) + apiutils.UTC(&ca.Spec.Rotation.Schedule.Standby) + } + return &ca, nil } diff --git a/lib/services/authority_test.go b/lib/services/authority_test.go index 14f185b3b59ff..9d456e9cab07a 100644 --- a/lib/services/authority_test.go +++ b/lib/services/authority_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package services +package services_test import ( "crypto/x509/pkix" @@ -24,7 +24,10 @@ import ( "github.com/stretchr/testify/require" "github.com/gravitational/teleport/api/types" + "github.com/gravitational/teleport/lib/auth/testauthority" + . "github.com/gravitational/teleport/lib/services" "github.com/gravitational/teleport/lib/tlsca" + "github.com/gravitational/teleport/lib/utils" ) func TestCertPoolFromCertAuthorities(t *testing.T) { @@ -161,3 +164,47 @@ func TestCertAuthorityEquivalence(t *testing.T) { ca1modID.SetResourceID(ca1.GetResourceID() + 1) require.True(t, CertAuthoritiesEquivalent(ca1, ca1modID)) } + +func TestCertAuthorityUTCUnmarshal(t *testing.T) { + t.Parallel() + ta := testauthority.New() + t.Cleanup(ta.Close) + + _, pub, err := ta.GenerateKeyPair("") + require.NoError(t, err) + _, cert, err := tlsca.GenerateSelfSignedCA(pkix.Name{CommonName: "clustername"}, nil, time.Hour) + require.NoError(t, err) + + caLocal, err := types.NewCertAuthority(types.CertAuthoritySpecV2{ + Type: types.HostCA, + ClusterName: "clustername", + ActiveKeys: types.CAKeySet{ + SSH: []*types.SSHKeyPair{{PublicKey: pub}}, + TLS: []*types.TLSKeyPair{{Cert: cert}}, + }, + Rotation: &types.Rotation{ + LastRotated: time.Now().In(time.FixedZone("not UTC", 2*60*60)), + }, + }) + require.NoError(t, err) + // needed for CertAuthoritiesEquivalent, as this will get called by + // UnmarshalCertAuthority + require.NoError(t, SyncCertAuthorityKeys(caLocal)) + + _, offset := caLocal.GetRotation().LastRotated.Zone() + require.NotZero(t, offset) + + item, err := utils.FastMarshal(caLocal) + require.NoError(t, err) + require.Contains(t, string(item), "+02:00\"") + caUTC, err := UnmarshalCertAuthority(item) + require.NoError(t, err) + + _, offset = caUTC.GetRotation().LastRotated.Zone() + require.Zero(t, offset) + + // see https://github.com/gogo/protobuf/issues/519 + require.NotPanics(t, func() { caUTC.Clone() }) + + require.True(t, CertAuthoritiesEquivalent(caLocal, caUTC)) +} diff --git a/lib/services/local/sessiontracker.go b/lib/services/local/sessiontracker.go index 514707d55d89a..4a963620fd08b 100644 --- a/lib/services/local/sessiontracker.go +++ b/lib/services/local/sessiontracker.go @@ -247,11 +247,6 @@ func (s *sessionTracker) UpdateSessionTracker(ctx context.Context, req *proto.Up switch update := req.Update.(type) { case *proto.UpdateSessionTrackerRequest_UpdateState: session.SetState(update.UpdateState.State) - if update.UpdateState.State == types.SessionState_SessionStateTerminated { - // Mark session tracker for deletion. - session.SetExpiry(s.bk.Clock().Now()) - } - case *proto.UpdateSessionTrackerRequest_AddParticipant: session.AddParticipant(*update.AddParticipant.Participant) case *proto.UpdateSessionTrackerRequest_RemoveParticipant: diff --git a/lib/services/presets.go b/lib/services/presets.go index d2608ac06e959..aee00809ff66e 100644 --- a/lib/services/presets.go +++ b/lib/services/presets.go @@ -116,6 +116,7 @@ func NewPresetAccessRole() types.Role { role.SetWindowsLogins(types.Allow, []string{teleport.TraitInternalWindowsLoginsVariable}) role.SetKubeUsers(types.Allow, []string{teleport.TraitInternalKubeUsersVariable}) role.SetKubeGroups(types.Allow, []string{teleport.TraitInternalKubeGroupsVariable}) + role.SetAWSRoleARNs(types.Allow, []string{teleport.TraitInternalAWSRoleARNs}) return role } diff --git a/lib/services/role.go b/lib/services/role.go index af048969c8e5d..c3fa89ead07a2 100644 --- a/lib/services/role.go +++ b/lib/services/role.go @@ -292,17 +292,7 @@ func ApplyTraits(r types.Role, traits map[string][]string) types.Role { r.SetWindowsLogins(condition, apiutils.Deduplicate(outWindowsLogins)) inRoleARNs := r.GetAWSRoleARNs(condition) - var outRoleARNs []string - for _, arn := range inRoleARNs { - variableValues, err := ApplyValueTraits(arn, traits) - if err != nil { - if !trace.IsNotFound(err) { - log.Debugf("Skipping AWS role ARN %v: %v.", arn, err) - } - continue - } - outRoleARNs = append(outRoleARNs, variableValues...) - } + outRoleARNs := applyValueTraitsSlice(inRoleARNs, traits, "AWS role ARN") r.SetAWSRoleARNs(condition, apiutils.Deduplicate(outRoleARNs)) // apply templates to kubernetes groups @@ -454,7 +444,8 @@ func ApplyValueTraits(val string, traits map[string][]string) ([]string, error) switch variable.Name() { case teleport.TraitLogins, teleport.TraitWindowsLogins, teleport.TraitKubeGroups, teleport.TraitKubeUsers, - teleport.TraitDBNames, teleport.TraitDBUsers: + teleport.TraitDBNames, teleport.TraitDBUsers, + teleport.TraitAWSRoleARNs: default: return nil, trace.BadParameter("unsupported variable %q", variable.Name()) } diff --git a/lib/services/role_test.go b/lib/services/role_test.go index 0da19d28f41d2..f3251cba4299b 100644 --- a/lib/services/role_test.go +++ b/lib/services/role_test.go @@ -24,6 +24,7 @@ import ( "testing" "time" + "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/constants" apidefaults "github.com/gravitational/teleport/api/defaults" "github.com/gravitational/teleport/api/types" @@ -1897,21 +1898,23 @@ func TestApplyTraits(t *testing.T) { { comment: "AWS role ARN substitute in allow rule", inTraits: map[string][]string{ - "foo": {"bar"}, + "foo": {"bar"}, + teleport.TraitAWSRoleARNs: {"baz"}, }, allow: rule{ - inRoleARNs: []string{"{{external.foo}}"}, - outRoleARNs: []string{"bar"}, + inRoleARNs: []string{"{{external.foo}}", teleport.TraitInternalAWSRoleARNs}, + outRoleARNs: []string{"bar", "baz"}, }, }, { comment: "AWS role ARN substitute in deny rule", inTraits: map[string][]string{ - "foo": {"bar"}, + "foo": {"bar"}, + teleport.TraitAWSRoleARNs: {"baz"}, }, deny: rule{ - inRoleARNs: []string{"{{external.foo}}"}, - outRoleARNs: []string{"bar"}, + inRoleARNs: []string{"{{external.foo}}", teleport.TraitInternalAWSRoleARNs}, + outRoleARNs: []string{"bar", "baz"}, }, }, { diff --git a/lib/srv/forward/sshserver.go b/lib/srv/forward/sshserver.go index f89f4323553a5..26e9e39620bc8 100644 --- a/lib/srv/forward/sshserver.go +++ b/lib/srv/forward/sshserver.go @@ -297,7 +297,7 @@ func New(c ServerConfig) (*Server, error) { s.kexAlgorithms = c.KEXAlgorithms s.macAlgorithms = c.MACAlgorithms - s.sessionRegistry, err = srv.NewSessionRegistry(s, nil) + s.sessionRegistry, err = srv.NewSessionRegistry(s, s.authClient) if err != nil { return nil, trace.Wrap(err) } diff --git a/lib/srv/regular/sshserver_test.go b/lib/srv/regular/sshserver_test.go index 15cf320267b6e..67f9863e01a99 100644 --- a/lib/srv/regular/sshserver_test.go +++ b/lib/srv/regular/sshserver_test.go @@ -1158,7 +1158,7 @@ func TestProxyRoundRobin(t *testing.T) { t.TempDir(), "", utils.NetAddr{}, - nil, + proxyClient, SetProxyMode(reverseTunnelServer, proxyClient), SetSessionServer(proxyClient), SetEmitter(nodeClient), @@ -1285,7 +1285,7 @@ func TestProxyDirectAccess(t *testing.T) { t.TempDir(), "", utils.NetAddr{}, - nil, + proxyClient, SetProxyMode(reverseTunnelServer, proxyClient), SetSessionServer(proxyClient), SetEmitter(nodeClient), @@ -1418,7 +1418,7 @@ func TestLimiter(t *testing.T) { nodeStateDir, "", utils.NetAddr{}, - nil, + nodeClient, SetLimiter(limiter), SetShell("/bin/sh"), SetSessionServer(nodeClient), @@ -1592,13 +1592,13 @@ func TestSessionTracker(t *testing.T) { err = se.Close() require.NoError(t, err) - // Advance server clock to trigger the session to close (after lingering) and - // update the session tracker to expired. We don't know when the linger sleeper - // will start waiting for clock, so we give it a grace period of 5 seconds. - time.Sleep(time.Second * 5) + f.clock.BlockUntil(3) f.clock.Advance(defaults.SessionIdlePeriod) - // once the session is closed, the tracker should expire (not found) + // Once the session is closed, the tracker should be termianted. + // Once the last set expiration is up, the tracker should be delted. + f.clock.Advance(defaults.SessionTrackerTTL) + trackerExpired := func() bool { _, err := f.testSrv.Auth().GetSessionTracker(ctx, tracker.GetSessionID()) return trace.IsNotFound(err) diff --git a/lib/srv/sess.go b/lib/srv/sess.go index abad0c1c8206d..3460ae846a610 100644 --- a/lib/srv/sess.go +++ b/lib/srv/sess.go @@ -92,6 +92,10 @@ func NewSessionRegistry(srv Server, auth auth.ClientI) (*SessionRegistry, error) return nil, trace.BadParameter("session server is required") } + if auth == nil { + return nil, trace.BadParameter("auth client is required") + } + return &SessionRegistry{ log: log.WithFields(log.Fields{ trace.Component: teleport.Component(teleport.ComponentSession, srv.Component()), @@ -372,7 +376,6 @@ func (s *SessionRegistry) leaveSession(party *party) error { // not lingering anymore? someone reconnected? cool then... no need // to die... if !sess.isLingering() { - fmt.Println("Lingering") s.log.Infof("Session %v has become active again.", sess.id) return } @@ -761,8 +764,11 @@ func (s *session) Close() error { } } + // Complete the session recording if s.recorder != nil { - s.recorder.Close(s.serverCtx) + if err := s.recorder.Complete(s.serverCtx); err != nil { + s.log.WithError(err).Warn("Failed to close recorder.") + } } s.stateUpdate.L.Lock() @@ -1701,10 +1707,6 @@ func (p *party) Close() (err error) { } func (s *session) trackerGet() (types.SessionTracker, error) { - if s.registry.auth == nil { - return nil, trace.BadParameter("cannot fetch session without auth service") - } - // get the session from the registry sess, err := s.registry.auth.GetSessionTracker(s.serverCtx, s.id.String()) if err != nil { @@ -1715,10 +1717,6 @@ func (s *session) trackerGet() (types.SessionTracker, error) { } func (s *session) trackerCreate(teleportUser string, policySet []*types.SessionTrackerPolicySet) error { - if s.registry.auth == nil { - return nil - } - s.log.Debug("Creating tracker") initator := &types.Participant{ ID: teleportUser, @@ -1776,10 +1774,6 @@ func (s *session) trackerCreate(teleportUser string, policySet []*types.SessionT } func (s *session) trackerAddParticipant(participant *party) error { - if s.registry.auth == nil { - return nil - } - s.log.Debugf("Tracking participant: %v", participant.user) req := &proto.UpdateSessionTrackerRequest{ SessionID: s.id.String(), @@ -1800,10 +1794,6 @@ func (s *session) trackerAddParticipant(participant *party) error { } func (s *session) trackerRemoveParticipant(participantID string) error { - if s.registry.auth == nil { - return nil - } - s.log.Debugf("Not tracking participant: %v", participantID) req := &proto.UpdateSessionTrackerRequest{ SessionID: s.id.String(), @@ -1822,10 +1812,6 @@ func (s *session) trackerUpdateState(state types.SessionState) error { s.state = state s.stateUpdate.Broadcast() - if s.registry.auth == nil { - return nil - } - req := &proto.UpdateSessionTrackerRequest{ SessionID: s.id.String(), Update: &proto.UpdateSessionTrackerRequest_UpdateState{ @@ -1840,10 +1826,6 @@ func (s *session) trackerUpdateState(state types.SessionState) error { } func (s *session) trackerUpdateExpiry(expires time.Time) error { - if s.registry.auth == nil { - return nil - } - req := &proto.UpdateSessionTrackerRequest{ SessionID: s.id.String(), Update: &proto.UpdateSessionTrackerRequest_UpdateExpiry{ diff --git a/lib/teleterm/api/proto/v1/service.proto b/lib/teleterm/api/proto/v1/service.proto index 5c5b45d2d59e1..724dea5110e4d 100644 --- a/lib/teleterm/api/proto/v1/service.proto +++ b/lib/teleterm/api/proto/v1/service.proto @@ -27,7 +27,7 @@ import "v1/app.proto"; import "v1/server.proto"; import "v1/auth_settings.proto"; -// TerminalService desribes teleterm service +// TerminalService describes Teleterm service service TerminalService { // ListRootClusters lists root clusters rpc ListRootClusters(ListClustersRequest) returns (ListClustersResponse); @@ -35,6 +35,8 @@ service TerminalService { rpc ListLeafClusters(ListLeafClustersRequest) returns (ListClustersResponse); // ListDatabases lists databases rpc ListDatabases(ListDatabasesRequest) returns (ListDatabasesResponse); + // ListDatabaseUsers lists allowed users for the given database based on the role set. + rpc ListDatabaseUsers(ListDatabaseUsersRequest) returns (ListDatabaseUsersResponse); // ListGateways lists gateways rpc ListGateways(ListGatewaysRequest) returns (ListGatewaysResponse); // ListServers lists servers @@ -117,6 +119,10 @@ message ListLeafClustersRequest { string cluster_uri = 1; } message ListDatabasesResponse { repeated Database databases = 1; } +message ListDatabaseUsersRequest { string db_uri = 1; } + +message ListDatabaseUsersResponse { repeated string users = 1; } + message CreateGatewayRequest { string target_uri = 1; string target_user = 2; diff --git a/lib/teleterm/api/protogen/golang/v1/service.pb.go b/lib/teleterm/api/protogen/golang/v1/service.pb.go index 9a20a4910724e..5bcb7df447286 100644 --- a/lib/teleterm/api/protogen/golang/v1/service.pb.go +++ b/lib/teleterm/api/protogen/golang/v1/service.pb.go @@ -647,6 +647,100 @@ func (x *ListDatabasesResponse) GetDatabases() []*Database { return nil } +type ListDatabaseUsersRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DbUri string `protobuf:"bytes,1,opt,name=db_uri,json=dbUri,proto3" json:"db_uri,omitempty"` +} + +func (x *ListDatabaseUsersRequest) Reset() { + *x = ListDatabaseUsersRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListDatabaseUsersRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListDatabaseUsersRequest) ProtoMessage() {} + +func (x *ListDatabaseUsersRequest) ProtoReflect() protoreflect.Message { + mi := &file_v1_service_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListDatabaseUsersRequest.ProtoReflect.Descriptor instead. +func (*ListDatabaseUsersRequest) Descriptor() ([]byte, []int) { + return file_v1_service_proto_rawDescGZIP(), []int{12} +} + +func (x *ListDatabaseUsersRequest) GetDbUri() string { + if x != nil { + return x.DbUri + } + return "" +} + +type ListDatabaseUsersResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Users []string `protobuf:"bytes,1,rep,name=users,proto3" json:"users,omitempty"` +} + +func (x *ListDatabaseUsersResponse) Reset() { + *x = ListDatabaseUsersResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_service_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListDatabaseUsersResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListDatabaseUsersResponse) ProtoMessage() {} + +func (x *ListDatabaseUsersResponse) ProtoReflect() protoreflect.Message { + mi := &file_v1_service_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListDatabaseUsersResponse.ProtoReflect.Descriptor instead. +func (*ListDatabaseUsersResponse) Descriptor() ([]byte, []int) { + return file_v1_service_proto_rawDescGZIP(), []int{13} +} + +func (x *ListDatabaseUsersResponse) GetUsers() []string { + if x != nil { + return x.Users + } + return nil +} + type CreateGatewayRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -660,7 +754,7 @@ type CreateGatewayRequest struct { func (x *CreateGatewayRequest) Reset() { *x = CreateGatewayRequest{} if protoimpl.UnsafeEnabled { - mi := &file_v1_service_proto_msgTypes[12] + mi := &file_v1_service_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -673,7 +767,7 @@ func (x *CreateGatewayRequest) String() string { func (*CreateGatewayRequest) ProtoMessage() {} func (x *CreateGatewayRequest) ProtoReflect() protoreflect.Message { - mi := &file_v1_service_proto_msgTypes[12] + mi := &file_v1_service_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -686,7 +780,7 @@ func (x *CreateGatewayRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateGatewayRequest.ProtoReflect.Descriptor instead. func (*CreateGatewayRequest) Descriptor() ([]byte, []int) { - return file_v1_service_proto_rawDescGZIP(), []int{12} + return file_v1_service_proto_rawDescGZIP(), []int{14} } func (x *CreateGatewayRequest) GetTargetUri() string { @@ -721,7 +815,7 @@ type ListGatewaysRequest struct { func (x *ListGatewaysRequest) Reset() { *x = ListGatewaysRequest{} if protoimpl.UnsafeEnabled { - mi := &file_v1_service_proto_msgTypes[13] + mi := &file_v1_service_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -734,7 +828,7 @@ func (x *ListGatewaysRequest) String() string { func (*ListGatewaysRequest) ProtoMessage() {} func (x *ListGatewaysRequest) ProtoReflect() protoreflect.Message { - mi := &file_v1_service_proto_msgTypes[13] + mi := &file_v1_service_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -747,7 +841,7 @@ func (x *ListGatewaysRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListGatewaysRequest.ProtoReflect.Descriptor instead. func (*ListGatewaysRequest) Descriptor() ([]byte, []int) { - return file_v1_service_proto_rawDescGZIP(), []int{13} + return file_v1_service_proto_rawDescGZIP(), []int{15} } func (x *ListGatewaysRequest) GetClusterIds() []string { @@ -768,7 +862,7 @@ type ListGatewaysResponse struct { func (x *ListGatewaysResponse) Reset() { *x = ListGatewaysResponse{} if protoimpl.UnsafeEnabled { - mi := &file_v1_service_proto_msgTypes[14] + mi := &file_v1_service_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -781,7 +875,7 @@ func (x *ListGatewaysResponse) String() string { func (*ListGatewaysResponse) ProtoMessage() {} func (x *ListGatewaysResponse) ProtoReflect() protoreflect.Message { - mi := &file_v1_service_proto_msgTypes[14] + mi := &file_v1_service_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -794,7 +888,7 @@ func (x *ListGatewaysResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListGatewaysResponse.ProtoReflect.Descriptor instead. func (*ListGatewaysResponse) Descriptor() ([]byte, []int) { - return file_v1_service_proto_rawDescGZIP(), []int{14} + return file_v1_service_proto_rawDescGZIP(), []int{16} } func (x *ListGatewaysResponse) GetGateways() []*Gateway { @@ -815,7 +909,7 @@ type RemoveGatewayRequest struct { func (x *RemoveGatewayRequest) Reset() { *x = RemoveGatewayRequest{} if protoimpl.UnsafeEnabled { - mi := &file_v1_service_proto_msgTypes[15] + mi := &file_v1_service_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -828,7 +922,7 @@ func (x *RemoveGatewayRequest) String() string { func (*RemoveGatewayRequest) ProtoMessage() {} func (x *RemoveGatewayRequest) ProtoReflect() protoreflect.Message { - mi := &file_v1_service_proto_msgTypes[15] + mi := &file_v1_service_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -841,7 +935,7 @@ func (x *RemoveGatewayRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RemoveGatewayRequest.ProtoReflect.Descriptor instead. func (*RemoveGatewayRequest) Descriptor() ([]byte, []int) { - return file_v1_service_proto_rawDescGZIP(), []int{15} + return file_v1_service_proto_rawDescGZIP(), []int{17} } func (x *RemoveGatewayRequest) GetGatewayUri() string { @@ -862,7 +956,7 @@ type ListServersRequest struct { func (x *ListServersRequest) Reset() { *x = ListServersRequest{} if protoimpl.UnsafeEnabled { - mi := &file_v1_service_proto_msgTypes[16] + mi := &file_v1_service_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -875,7 +969,7 @@ func (x *ListServersRequest) String() string { func (*ListServersRequest) ProtoMessage() {} func (x *ListServersRequest) ProtoReflect() protoreflect.Message { - mi := &file_v1_service_proto_msgTypes[16] + mi := &file_v1_service_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -888,7 +982,7 @@ func (x *ListServersRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListServersRequest.ProtoReflect.Descriptor instead. func (*ListServersRequest) Descriptor() ([]byte, []int) { - return file_v1_service_proto_rawDescGZIP(), []int{16} + return file_v1_service_proto_rawDescGZIP(), []int{18} } func (x *ListServersRequest) GetClusterUri() string { @@ -909,7 +1003,7 @@ type ListServersResponse struct { func (x *ListServersResponse) Reset() { *x = ListServersResponse{} if protoimpl.UnsafeEnabled { - mi := &file_v1_service_proto_msgTypes[17] + mi := &file_v1_service_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -922,7 +1016,7 @@ func (x *ListServersResponse) String() string { func (*ListServersResponse) ProtoMessage() {} func (x *ListServersResponse) ProtoReflect() protoreflect.Message { - mi := &file_v1_service_proto_msgTypes[17] + mi := &file_v1_service_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -935,7 +1029,7 @@ func (x *ListServersResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListServersResponse.ProtoReflect.Descriptor instead. func (*ListServersResponse) Descriptor() ([]byte, []int) { - return file_v1_service_proto_rawDescGZIP(), []int{17} + return file_v1_service_proto_rawDescGZIP(), []int{19} } func (x *ListServersResponse) GetServers() []*Server { @@ -956,7 +1050,7 @@ type ListKubesResponse struct { func (x *ListKubesResponse) Reset() { *x = ListKubesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_v1_service_proto_msgTypes[18] + mi := &file_v1_service_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -969,7 +1063,7 @@ func (x *ListKubesResponse) String() string { func (*ListKubesResponse) ProtoMessage() {} func (x *ListKubesResponse) ProtoReflect() protoreflect.Message { - mi := &file_v1_service_proto_msgTypes[18] + mi := &file_v1_service_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -982,7 +1076,7 @@ func (x *ListKubesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListKubesResponse.ProtoReflect.Descriptor instead. func (*ListKubesResponse) Descriptor() ([]byte, []int) { - return file_v1_service_proto_rawDescGZIP(), []int{18} + return file_v1_service_proto_rawDescGZIP(), []int{20} } func (x *ListKubesResponse) GetKubes() []*Kube { @@ -1003,7 +1097,7 @@ type ListAppsResponse struct { func (x *ListAppsResponse) Reset() { *x = ListAppsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_v1_service_proto_msgTypes[19] + mi := &file_v1_service_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1016,7 +1110,7 @@ func (x *ListAppsResponse) String() string { func (*ListAppsResponse) ProtoMessage() {} func (x *ListAppsResponse) ProtoReflect() protoreflect.Message { - mi := &file_v1_service_proto_msgTypes[19] + mi := &file_v1_service_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1029,7 +1123,7 @@ func (x *ListAppsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListAppsResponse.ProtoReflect.Descriptor instead. func (*ListAppsResponse) Descriptor() ([]byte, []int) { - return file_v1_service_proto_rawDescGZIP(), []int{19} + return file_v1_service_proto_rawDescGZIP(), []int{21} } func (x *ListAppsResponse) GetApps() []*App { @@ -1050,7 +1144,7 @@ type GetAuthSettingsRequest struct { func (x *GetAuthSettingsRequest) Reset() { *x = GetAuthSettingsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_v1_service_proto_msgTypes[20] + mi := &file_v1_service_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1063,7 +1157,7 @@ func (x *GetAuthSettingsRequest) String() string { func (*GetAuthSettingsRequest) ProtoMessage() {} func (x *GetAuthSettingsRequest) ProtoReflect() protoreflect.Message { - mi := &file_v1_service_proto_msgTypes[20] + mi := &file_v1_service_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1076,7 +1170,7 @@ func (x *GetAuthSettingsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAuthSettingsRequest.ProtoReflect.Descriptor instead. func (*GetAuthSettingsRequest) Descriptor() ([]byte, []int) { - return file_v1_service_proto_rawDescGZIP(), []int{20} + return file_v1_service_proto_rawDescGZIP(), []int{22} } func (x *GetAuthSettingsRequest) GetClusterUri() string { @@ -1095,7 +1189,7 @@ type EmptyResponse struct { func (x *EmptyResponse) Reset() { *x = EmptyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_v1_service_proto_msgTypes[21] + mi := &file_v1_service_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1108,7 +1202,7 @@ func (x *EmptyResponse) String() string { func (*EmptyResponse) ProtoMessage() {} func (x *EmptyResponse) ProtoReflect() protoreflect.Message { - mi := &file_v1_service_proto_msgTypes[21] + mi := &file_v1_service_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1121,7 +1215,7 @@ func (x *EmptyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use EmptyResponse.ProtoReflect.Descriptor instead. func (*EmptyResponse) Descriptor() ([]byte, []int) { - return file_v1_service_proto_rawDescGZIP(), []int{21} + return file_v1_service_proto_rawDescGZIP(), []int{23} } // LocalParams describes parameters for local user logins @@ -1141,7 +1235,7 @@ type LoginRequest_LocalParams struct { func (x *LoginRequest_LocalParams) Reset() { *x = LoginRequest_LocalParams{} if protoimpl.UnsafeEnabled { - mi := &file_v1_service_proto_msgTypes[22] + mi := &file_v1_service_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1154,7 +1248,7 @@ func (x *LoginRequest_LocalParams) String() string { func (*LoginRequest_LocalParams) ProtoMessage() {} func (x *LoginRequest_LocalParams) ProtoReflect() protoreflect.Message { - mi := &file_v1_service_proto_msgTypes[22] + mi := &file_v1_service_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1206,7 +1300,7 @@ type LoginRequest_SsoParams struct { func (x *LoginRequest_SsoParams) Reset() { *x = LoginRequest_SsoParams{} if protoimpl.UnsafeEnabled { - mi := &file_v1_service_proto_msgTypes[23] + mi := &file_v1_service_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1219,7 +1313,7 @@ func (x *LoginRequest_SsoParams) String() string { func (*LoginRequest_SsoParams) ProtoMessage() {} func (x *LoginRequest_SsoParams) ProtoReflect() protoreflect.Message { - mi := &file_v1_service_proto_msgTypes[23] + mi := &file_v1_service_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1325,144 +1419,157 @@ var file_v1_service_proto_rawDesc = []byte{ 0x3c, 0x0a, 0x09, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, - 0x73, 0x65, 0x52, 0x09, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x22, 0x75, 0x0a, - 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, - 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x55, 0x72, 0x69, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x75, - 0x73, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x70, - 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x6c, - 0x50, 0x6f, 0x72, 0x74, 0x22, 0x36, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x61, 0x74, 0x65, - 0x77, 0x61, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x51, 0x0a, 0x14, - 0x4c, 0x69, 0x73, 0x74, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x08, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, - 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x61, - 0x74, 0x65, 0x77, 0x61, 0x79, 0x52, 0x08, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x73, 0x22, - 0x37, 0x0a, 0x14, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x67, 0x61, 0x74, 0x65, 0x77, - 0x61, 0x79, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x67, 0x61, - 0x74, 0x65, 0x77, 0x61, 0x79, 0x55, 0x72, 0x69, 0x22, 0x35, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, - 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x55, 0x72, 0x69, 0x22, - 0x4d, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, - 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x53, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x22, 0x45, - 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x75, 0x62, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x6b, 0x75, 0x62, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, - 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x75, 0x62, 0x65, 0x52, 0x05, - 0x6b, 0x75, 0x62, 0x65, 0x73, 0x22, 0x41, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x70, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x04, 0x61, 0x70, 0x70, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, - 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x41, - 0x70, 0x70, 0x52, 0x04, 0x61, 0x70, 0x70, 0x73, 0x22, 0x39, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x41, - 0x75, 0x74, 0x68, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x75, 0x72, - 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x55, 0x72, 0x69, 0x22, 0x0f, 0x0a, 0x0d, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xb0, 0x0b, 0x0a, 0x0f, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, - 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x69, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, - 0x52, 0x6f, 0x6f, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x29, 0x2e, 0x74, + 0x73, 0x65, 0x52, 0x09, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x22, 0x31, 0x0a, + 0x18, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x55, 0x73, 0x65, + 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x64, 0x62, 0x5f, + 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x64, 0x62, 0x55, 0x72, 0x69, + 0x22, 0x31, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, + 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x75, 0x73, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x75, 0x73, + 0x65, 0x72, 0x73, 0x22, 0x75, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x47, 0x61, 0x74, + 0x65, 0x77, 0x61, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x55, 0x72, 0x69, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x36, 0x0a, 0x13, 0x4c, 0x69, + 0x73, 0x74, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, + 0x64, 0x73, 0x22, 0x51, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, + 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x08, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, + 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, + 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x52, 0x08, 0x67, 0x61, 0x74, + 0x65, 0x77, 0x61, 0x79, 0x73, 0x22, 0x37, 0x0a, 0x14, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x47, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, + 0x0b, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x55, 0x72, 0x69, 0x22, 0x35, + 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x55, 0x72, 0x69, 0x22, 0x4d, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x07, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, + 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x07, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x73, 0x22, 0x45, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x75, 0x62, 0x65, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x6b, 0x75, 0x62, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, + 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, + 0x4b, 0x75, 0x62, 0x65, 0x52, 0x05, 0x6b, 0x75, 0x62, 0x65, 0x73, 0x22, 0x41, 0x0a, 0x10, 0x4c, + 0x69, 0x73, 0x74, 0x41, 0x70, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x2d, 0x0a, 0x04, 0x61, 0x70, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, + 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x70, 0x70, 0x52, 0x04, 0x61, 0x70, 0x70, 0x73, 0x22, 0x39, + 0x0a, 0x16, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x55, 0x72, 0x69, 0x22, 0x0f, 0x0a, 0x0d, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xa6, 0x0c, 0x0a, 0x0f, 0x54, + 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x69, + 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x73, 0x12, 0x29, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, + 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, + 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, + 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6d, 0x0a, 0x10, 0x4c, 0x69, 0x73, + 0x74, 0x4c, 0x65, 0x61, 0x66, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x2d, 0x2e, + 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, + 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4c, 0x65, 0x61, 0x66, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, - 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4c, - 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x6d, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x4c, 0x65, 0x61, 0x66, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x2d, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x68, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, + 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x12, 0x2a, 0x2e, 0x74, 0x65, 0x6c, 0x65, + 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x74, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, + 0x73, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4c, - 0x69, 0x73, 0x74, 0x4c, 0x65, 0x61, 0x66, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, - 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x68, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, - 0x73, 0x65, 0x73, 0x12, 0x2a, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, - 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, - 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x2b, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, - 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, - 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x65, 0x0a, 0x0c, - 0x4c, 0x69, 0x73, 0x74, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x73, 0x12, 0x29, 0x2e, 0x74, - 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, - 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, + 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4c, - 0x69, 0x73, 0x74, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x62, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x73, 0x12, 0x28, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, - 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x74, - 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, - 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5c, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x4b, - 0x75, 0x62, 0x65, 0x73, 0x12, 0x26, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, - 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x4b, 0x75, 0x62, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, - 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, - 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x75, 0x62, 0x65, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x70, - 0x73, 0x12, 0x25, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, - 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x70, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, + 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x65, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, + 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x73, 0x12, 0x29, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, + 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, + 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x47, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x62, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x28, + 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, + 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, - 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x5a, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, + 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x5c, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x75, 0x62, 0x65, 0x73, + 0x12, 0x26, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, + 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x75, 0x62, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, + 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x75, 0x62, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x59, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x70, 0x73, 0x12, 0x25, 0x2e, + 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, + 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x70, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, + 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x41, 0x70, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x0d, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x12, 0x2a, 0x2e, + 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, + 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x47, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x74, 0x65, 0x6c, 0x65, + 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, + 0x2e, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x12, 0x54, 0x0a, 0x0a, 0x41, 0x64, 0x64, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x27, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, + 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, + 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1d, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, + 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x60, + 0x0a, 0x0d, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, + 0x2a, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, + 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x74, 0x65, + 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, + 0x76, 0x31, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x60, 0x0a, 0x0d, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x12, 0x2a, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, - 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x47, - 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, + 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x47, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, - 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x12, 0x54, 0x0a, 0x0a, - 0x41, 0x64, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x27, 0x2e, 0x74, 0x65, 0x6c, + 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x63, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x2c, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, + 0x41, 0x75, 0x74, 0x68, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, + 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x54, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x27, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, + 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, + 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x50, 0x0a, + 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x22, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, + 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x6f, + 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, - 0x31, 0x2e, 0x41, 0x64, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, - 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x12, 0x60, 0x0a, 0x0d, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x12, 0x2a, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, - 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, - 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x23, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, - 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x0d, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x47, 0x61, - 0x74, 0x65, 0x77, 0x61, 0x79, 0x12, 0x2a, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, - 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6d, - 0x6f, 0x76, 0x65, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x23, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, - 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, - 0x68, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x2c, 0x2e, 0x74, 0x65, 0x6c, 0x65, - 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, - 0x2e, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, - 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x41, - 0x75, 0x74, 0x68, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x54, 0x0a, 0x0a, 0x47, - 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x27, 0x2e, 0x74, 0x65, 0x6c, 0x65, + 0x31, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x52, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x12, 0x23, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, - 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, - 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x12, 0x50, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x22, 0x2e, 0x74, 0x65, 0x6c, - 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, - 0x31, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, + 0x2e, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x12, 0x23, 0x2e, - 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, - 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x74, 0x65, - 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x61, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2f, 0x6c, 0x69, 0x62, - 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x72, 0x6d, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x6e, 0x73, 0x65, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x72, 0x61, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x2f, + 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2f, 0x6c, 0x69, 0x62, 0x2f, 0x74, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x72, 0x6d, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1477,81 +1584,85 @@ func file_v1_service_proto_rawDescGZIP() []byte { return file_v1_service_proto_rawDescData } -var file_v1_service_proto_msgTypes = make([]protoimpl.MessageInfo, 24) +var file_v1_service_proto_msgTypes = make([]protoimpl.MessageInfo, 26) var file_v1_service_proto_goTypes = []interface{}{ - (*RemoveClusterRequest)(nil), // 0: teleport.terminal.v1.RemoveClusterRequest - (*GetClusterRequest)(nil), // 1: teleport.terminal.v1.GetClusterRequest - (*LogoutRequest)(nil), // 2: teleport.terminal.v1.LogoutRequest - (*LoginRequest)(nil), // 3: teleport.terminal.v1.LoginRequest - (*AddClusterRequest)(nil), // 4: teleport.terminal.v1.AddClusterRequest - (*ListKubesRequest)(nil), // 5: teleport.terminal.v1.ListKubesRequest - (*ListAppsRequest)(nil), // 6: teleport.terminal.v1.ListAppsRequest - (*ListClustersRequest)(nil), // 7: teleport.terminal.v1.ListClustersRequest - (*ListClustersResponse)(nil), // 8: teleport.terminal.v1.ListClustersResponse - (*ListDatabasesRequest)(nil), // 9: teleport.terminal.v1.ListDatabasesRequest - (*ListLeafClustersRequest)(nil), // 10: teleport.terminal.v1.ListLeafClustersRequest - (*ListDatabasesResponse)(nil), // 11: teleport.terminal.v1.ListDatabasesResponse - (*CreateGatewayRequest)(nil), // 12: teleport.terminal.v1.CreateGatewayRequest - (*ListGatewaysRequest)(nil), // 13: teleport.terminal.v1.ListGatewaysRequest - (*ListGatewaysResponse)(nil), // 14: teleport.terminal.v1.ListGatewaysResponse - (*RemoveGatewayRequest)(nil), // 15: teleport.terminal.v1.RemoveGatewayRequest - (*ListServersRequest)(nil), // 16: teleport.terminal.v1.ListServersRequest - (*ListServersResponse)(nil), // 17: teleport.terminal.v1.ListServersResponse - (*ListKubesResponse)(nil), // 18: teleport.terminal.v1.ListKubesResponse - (*ListAppsResponse)(nil), // 19: teleport.terminal.v1.ListAppsResponse - (*GetAuthSettingsRequest)(nil), // 20: teleport.terminal.v1.GetAuthSettingsRequest - (*EmptyResponse)(nil), // 21: teleport.terminal.v1.EmptyResponse - (*LoginRequest_LocalParams)(nil), // 22: teleport.terminal.v1.LoginRequest.LocalParams - (*LoginRequest_SsoParams)(nil), // 23: teleport.terminal.v1.LoginRequest.SsoParams - (*Cluster)(nil), // 24: teleport.terminal.v1.Cluster - (*Database)(nil), // 25: teleport.terminal.v1.Database - (*Gateway)(nil), // 26: teleport.terminal.v1.Gateway - (*Server)(nil), // 27: teleport.terminal.v1.Server - (*Kube)(nil), // 28: teleport.terminal.v1.Kube - (*App)(nil), // 29: teleport.terminal.v1.App - (*AuthSettings)(nil), // 30: teleport.terminal.v1.AuthSettings + (*RemoveClusterRequest)(nil), // 0: teleport.terminal.v1.RemoveClusterRequest + (*GetClusterRequest)(nil), // 1: teleport.terminal.v1.GetClusterRequest + (*LogoutRequest)(nil), // 2: teleport.terminal.v1.LogoutRequest + (*LoginRequest)(nil), // 3: teleport.terminal.v1.LoginRequest + (*AddClusterRequest)(nil), // 4: teleport.terminal.v1.AddClusterRequest + (*ListKubesRequest)(nil), // 5: teleport.terminal.v1.ListKubesRequest + (*ListAppsRequest)(nil), // 6: teleport.terminal.v1.ListAppsRequest + (*ListClustersRequest)(nil), // 7: teleport.terminal.v1.ListClustersRequest + (*ListClustersResponse)(nil), // 8: teleport.terminal.v1.ListClustersResponse + (*ListDatabasesRequest)(nil), // 9: teleport.terminal.v1.ListDatabasesRequest + (*ListLeafClustersRequest)(nil), // 10: teleport.terminal.v1.ListLeafClustersRequest + (*ListDatabasesResponse)(nil), // 11: teleport.terminal.v1.ListDatabasesResponse + (*ListDatabaseUsersRequest)(nil), // 12: teleport.terminal.v1.ListDatabaseUsersRequest + (*ListDatabaseUsersResponse)(nil), // 13: teleport.terminal.v1.ListDatabaseUsersResponse + (*CreateGatewayRequest)(nil), // 14: teleport.terminal.v1.CreateGatewayRequest + (*ListGatewaysRequest)(nil), // 15: teleport.terminal.v1.ListGatewaysRequest + (*ListGatewaysResponse)(nil), // 16: teleport.terminal.v1.ListGatewaysResponse + (*RemoveGatewayRequest)(nil), // 17: teleport.terminal.v1.RemoveGatewayRequest + (*ListServersRequest)(nil), // 18: teleport.terminal.v1.ListServersRequest + (*ListServersResponse)(nil), // 19: teleport.terminal.v1.ListServersResponse + (*ListKubesResponse)(nil), // 20: teleport.terminal.v1.ListKubesResponse + (*ListAppsResponse)(nil), // 21: teleport.terminal.v1.ListAppsResponse + (*GetAuthSettingsRequest)(nil), // 22: teleport.terminal.v1.GetAuthSettingsRequest + (*EmptyResponse)(nil), // 23: teleport.terminal.v1.EmptyResponse + (*LoginRequest_LocalParams)(nil), // 24: teleport.terminal.v1.LoginRequest.LocalParams + (*LoginRequest_SsoParams)(nil), // 25: teleport.terminal.v1.LoginRequest.SsoParams + (*Cluster)(nil), // 26: teleport.terminal.v1.Cluster + (*Database)(nil), // 27: teleport.terminal.v1.Database + (*Gateway)(nil), // 28: teleport.terminal.v1.Gateway + (*Server)(nil), // 29: teleport.terminal.v1.Server + (*Kube)(nil), // 30: teleport.terminal.v1.Kube + (*App)(nil), // 31: teleport.terminal.v1.App + (*AuthSettings)(nil), // 32: teleport.terminal.v1.AuthSettings } var file_v1_service_proto_depIdxs = []int32{ - 22, // 0: teleport.terminal.v1.LoginRequest.local:type_name -> teleport.terminal.v1.LoginRequest.LocalParams - 23, // 1: teleport.terminal.v1.LoginRequest.sso:type_name -> teleport.terminal.v1.LoginRequest.SsoParams - 24, // 2: teleport.terminal.v1.ListClustersResponse.clusters:type_name -> teleport.terminal.v1.Cluster - 25, // 3: teleport.terminal.v1.ListDatabasesResponse.databases:type_name -> teleport.terminal.v1.Database - 26, // 4: teleport.terminal.v1.ListGatewaysResponse.gateways:type_name -> teleport.terminal.v1.Gateway - 27, // 5: teleport.terminal.v1.ListServersResponse.servers:type_name -> teleport.terminal.v1.Server - 28, // 6: teleport.terminal.v1.ListKubesResponse.kubes:type_name -> teleport.terminal.v1.Kube - 29, // 7: teleport.terminal.v1.ListAppsResponse.apps:type_name -> teleport.terminal.v1.App + 24, // 0: teleport.terminal.v1.LoginRequest.local:type_name -> teleport.terminal.v1.LoginRequest.LocalParams + 25, // 1: teleport.terminal.v1.LoginRequest.sso:type_name -> teleport.terminal.v1.LoginRequest.SsoParams + 26, // 2: teleport.terminal.v1.ListClustersResponse.clusters:type_name -> teleport.terminal.v1.Cluster + 27, // 3: teleport.terminal.v1.ListDatabasesResponse.databases:type_name -> teleport.terminal.v1.Database + 28, // 4: teleport.terminal.v1.ListGatewaysResponse.gateways:type_name -> teleport.terminal.v1.Gateway + 29, // 5: teleport.terminal.v1.ListServersResponse.servers:type_name -> teleport.terminal.v1.Server + 30, // 6: teleport.terminal.v1.ListKubesResponse.kubes:type_name -> teleport.terminal.v1.Kube + 31, // 7: teleport.terminal.v1.ListAppsResponse.apps:type_name -> teleport.terminal.v1.App 7, // 8: teleport.terminal.v1.TerminalService.ListRootClusters:input_type -> teleport.terminal.v1.ListClustersRequest 10, // 9: teleport.terminal.v1.TerminalService.ListLeafClusters:input_type -> teleport.terminal.v1.ListLeafClustersRequest 9, // 10: teleport.terminal.v1.TerminalService.ListDatabases:input_type -> teleport.terminal.v1.ListDatabasesRequest - 13, // 11: teleport.terminal.v1.TerminalService.ListGateways:input_type -> teleport.terminal.v1.ListGatewaysRequest - 16, // 12: teleport.terminal.v1.TerminalService.ListServers:input_type -> teleport.terminal.v1.ListServersRequest - 5, // 13: teleport.terminal.v1.TerminalService.ListKubes:input_type -> teleport.terminal.v1.ListKubesRequest - 6, // 14: teleport.terminal.v1.TerminalService.ListApps:input_type -> teleport.terminal.v1.ListAppsRequest - 12, // 15: teleport.terminal.v1.TerminalService.CreateGateway:input_type -> teleport.terminal.v1.CreateGatewayRequest - 4, // 16: teleport.terminal.v1.TerminalService.AddCluster:input_type -> teleport.terminal.v1.AddClusterRequest - 0, // 17: teleport.terminal.v1.TerminalService.RemoveCluster:input_type -> teleport.terminal.v1.RemoveClusterRequest - 15, // 18: teleport.terminal.v1.TerminalService.RemoveGateway:input_type -> teleport.terminal.v1.RemoveGatewayRequest - 20, // 19: teleport.terminal.v1.TerminalService.GetAuthSettings:input_type -> teleport.terminal.v1.GetAuthSettingsRequest - 1, // 20: teleport.terminal.v1.TerminalService.GetCluster:input_type -> teleport.terminal.v1.GetClusterRequest - 3, // 21: teleport.terminal.v1.TerminalService.Login:input_type -> teleport.terminal.v1.LoginRequest - 2, // 22: teleport.terminal.v1.TerminalService.Logout:input_type -> teleport.terminal.v1.LogoutRequest - 8, // 23: teleport.terminal.v1.TerminalService.ListRootClusters:output_type -> teleport.terminal.v1.ListClustersResponse - 8, // 24: teleport.terminal.v1.TerminalService.ListLeafClusters:output_type -> teleport.terminal.v1.ListClustersResponse - 11, // 25: teleport.terminal.v1.TerminalService.ListDatabases:output_type -> teleport.terminal.v1.ListDatabasesResponse - 14, // 26: teleport.terminal.v1.TerminalService.ListGateways:output_type -> teleport.terminal.v1.ListGatewaysResponse - 17, // 27: teleport.terminal.v1.TerminalService.ListServers:output_type -> teleport.terminal.v1.ListServersResponse - 18, // 28: teleport.terminal.v1.TerminalService.ListKubes:output_type -> teleport.terminal.v1.ListKubesResponse - 19, // 29: teleport.terminal.v1.TerminalService.ListApps:output_type -> teleport.terminal.v1.ListAppsResponse - 26, // 30: teleport.terminal.v1.TerminalService.CreateGateway:output_type -> teleport.terminal.v1.Gateway - 24, // 31: teleport.terminal.v1.TerminalService.AddCluster:output_type -> teleport.terminal.v1.Cluster - 21, // 32: teleport.terminal.v1.TerminalService.RemoveCluster:output_type -> teleport.terminal.v1.EmptyResponse - 21, // 33: teleport.terminal.v1.TerminalService.RemoveGateway:output_type -> teleport.terminal.v1.EmptyResponse - 30, // 34: teleport.terminal.v1.TerminalService.GetAuthSettings:output_type -> teleport.terminal.v1.AuthSettings - 24, // 35: teleport.terminal.v1.TerminalService.GetCluster:output_type -> teleport.terminal.v1.Cluster - 21, // 36: teleport.terminal.v1.TerminalService.Login:output_type -> teleport.terminal.v1.EmptyResponse - 21, // 37: teleport.terminal.v1.TerminalService.Logout:output_type -> teleport.terminal.v1.EmptyResponse - 23, // [23:38] is the sub-list for method output_type - 8, // [8:23] is the sub-list for method input_type + 12, // 11: teleport.terminal.v1.TerminalService.ListDatabaseUsers:input_type -> teleport.terminal.v1.ListDatabaseUsersRequest + 15, // 12: teleport.terminal.v1.TerminalService.ListGateways:input_type -> teleport.terminal.v1.ListGatewaysRequest + 18, // 13: teleport.terminal.v1.TerminalService.ListServers:input_type -> teleport.terminal.v1.ListServersRequest + 5, // 14: teleport.terminal.v1.TerminalService.ListKubes:input_type -> teleport.terminal.v1.ListKubesRequest + 6, // 15: teleport.terminal.v1.TerminalService.ListApps:input_type -> teleport.terminal.v1.ListAppsRequest + 14, // 16: teleport.terminal.v1.TerminalService.CreateGateway:input_type -> teleport.terminal.v1.CreateGatewayRequest + 4, // 17: teleport.terminal.v1.TerminalService.AddCluster:input_type -> teleport.terminal.v1.AddClusterRequest + 0, // 18: teleport.terminal.v1.TerminalService.RemoveCluster:input_type -> teleport.terminal.v1.RemoveClusterRequest + 17, // 19: teleport.terminal.v1.TerminalService.RemoveGateway:input_type -> teleport.terminal.v1.RemoveGatewayRequest + 22, // 20: teleport.terminal.v1.TerminalService.GetAuthSettings:input_type -> teleport.terminal.v1.GetAuthSettingsRequest + 1, // 21: teleport.terminal.v1.TerminalService.GetCluster:input_type -> teleport.terminal.v1.GetClusterRequest + 3, // 22: teleport.terminal.v1.TerminalService.Login:input_type -> teleport.terminal.v1.LoginRequest + 2, // 23: teleport.terminal.v1.TerminalService.Logout:input_type -> teleport.terminal.v1.LogoutRequest + 8, // 24: teleport.terminal.v1.TerminalService.ListRootClusters:output_type -> teleport.terminal.v1.ListClustersResponse + 8, // 25: teleport.terminal.v1.TerminalService.ListLeafClusters:output_type -> teleport.terminal.v1.ListClustersResponse + 11, // 26: teleport.terminal.v1.TerminalService.ListDatabases:output_type -> teleport.terminal.v1.ListDatabasesResponse + 13, // 27: teleport.terminal.v1.TerminalService.ListDatabaseUsers:output_type -> teleport.terminal.v1.ListDatabaseUsersResponse + 16, // 28: teleport.terminal.v1.TerminalService.ListGateways:output_type -> teleport.terminal.v1.ListGatewaysResponse + 19, // 29: teleport.terminal.v1.TerminalService.ListServers:output_type -> teleport.terminal.v1.ListServersResponse + 20, // 30: teleport.terminal.v1.TerminalService.ListKubes:output_type -> teleport.terminal.v1.ListKubesResponse + 21, // 31: teleport.terminal.v1.TerminalService.ListApps:output_type -> teleport.terminal.v1.ListAppsResponse + 28, // 32: teleport.terminal.v1.TerminalService.CreateGateway:output_type -> teleport.terminal.v1.Gateway + 26, // 33: teleport.terminal.v1.TerminalService.AddCluster:output_type -> teleport.terminal.v1.Cluster + 23, // 34: teleport.terminal.v1.TerminalService.RemoveCluster:output_type -> teleport.terminal.v1.EmptyResponse + 23, // 35: teleport.terminal.v1.TerminalService.RemoveGateway:output_type -> teleport.terminal.v1.EmptyResponse + 32, // 36: teleport.terminal.v1.TerminalService.GetAuthSettings:output_type -> teleport.terminal.v1.AuthSettings + 26, // 37: teleport.terminal.v1.TerminalService.GetCluster:output_type -> teleport.terminal.v1.Cluster + 23, // 38: teleport.terminal.v1.TerminalService.Login:output_type -> teleport.terminal.v1.EmptyResponse + 23, // 39: teleport.terminal.v1.TerminalService.Logout:output_type -> teleport.terminal.v1.EmptyResponse + 24, // [24:40] is the sub-list for method output_type + 8, // [8:24] is the sub-list for method input_type 8, // [8:8] is the sub-list for extension type_name 8, // [8:8] is the sub-list for extension extendee 0, // [0:8] is the sub-list for field type_name @@ -1715,7 +1826,7 @@ func file_v1_service_proto_init() { } } file_v1_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateGatewayRequest); i { + switch v := v.(*ListDatabaseUsersRequest); i { case 0: return &v.state case 1: @@ -1727,7 +1838,7 @@ func file_v1_service_proto_init() { } } file_v1_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListGatewaysRequest); i { + switch v := v.(*ListDatabaseUsersResponse); i { case 0: return &v.state case 1: @@ -1739,7 +1850,7 @@ func file_v1_service_proto_init() { } } file_v1_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListGatewaysResponse); i { + switch v := v.(*CreateGatewayRequest); i { case 0: return &v.state case 1: @@ -1751,7 +1862,7 @@ func file_v1_service_proto_init() { } } file_v1_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveGatewayRequest); i { + switch v := v.(*ListGatewaysRequest); i { case 0: return &v.state case 1: @@ -1763,7 +1874,7 @@ func file_v1_service_proto_init() { } } file_v1_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListServersRequest); i { + switch v := v.(*ListGatewaysResponse); i { case 0: return &v.state case 1: @@ -1775,7 +1886,7 @@ func file_v1_service_proto_init() { } } file_v1_service_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListServersResponse); i { + switch v := v.(*RemoveGatewayRequest); i { case 0: return &v.state case 1: @@ -1787,7 +1898,7 @@ func file_v1_service_proto_init() { } } file_v1_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListKubesResponse); i { + switch v := v.(*ListServersRequest); i { case 0: return &v.state case 1: @@ -1799,7 +1910,7 @@ func file_v1_service_proto_init() { } } file_v1_service_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListAppsResponse); i { + switch v := v.(*ListServersResponse); i { case 0: return &v.state case 1: @@ -1811,7 +1922,7 @@ func file_v1_service_proto_init() { } } file_v1_service_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAuthSettingsRequest); i { + switch v := v.(*ListKubesResponse); i { case 0: return &v.state case 1: @@ -1823,7 +1934,7 @@ func file_v1_service_proto_init() { } } file_v1_service_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EmptyResponse); i { + switch v := v.(*ListAppsResponse); i { case 0: return &v.state case 1: @@ -1835,7 +1946,7 @@ func file_v1_service_proto_init() { } } file_v1_service_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LoginRequest_LocalParams); i { + switch v := v.(*GetAuthSettingsRequest); i { case 0: return &v.state case 1: @@ -1847,6 +1958,30 @@ func file_v1_service_proto_init() { } } file_v1_service_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EmptyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_service_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LoginRequest_LocalParams); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_service_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*LoginRequest_SsoParams); i { case 0: return &v.state @@ -1869,7 +2004,7 @@ func file_v1_service_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_v1_service_proto_rawDesc, NumEnums: 0, - NumMessages: 24, + NumMessages: 26, NumExtensions: 0, NumServices: 1, }, @@ -1901,6 +2036,8 @@ type TerminalServiceClient interface { ListLeafClusters(ctx context.Context, in *ListLeafClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) // ListDatabases lists databases ListDatabases(ctx context.Context, in *ListDatabasesRequest, opts ...grpc.CallOption) (*ListDatabasesResponse, error) + // ListDatabaseUsers lists allowed users for the given database based on the role set. + ListDatabaseUsers(ctx context.Context, in *ListDatabaseUsersRequest, opts ...grpc.CallOption) (*ListDatabaseUsersResponse, error) // ListGateways lists gateways ListGateways(ctx context.Context, in *ListGatewaysRequest, opts ...grpc.CallOption) (*ListGatewaysResponse, error) // ListServers lists servers @@ -1962,6 +2099,15 @@ func (c *terminalServiceClient) ListDatabases(ctx context.Context, in *ListDatab return out, nil } +func (c *terminalServiceClient) ListDatabaseUsers(ctx context.Context, in *ListDatabaseUsersRequest, opts ...grpc.CallOption) (*ListDatabaseUsersResponse, error) { + out := new(ListDatabaseUsersResponse) + err := c.cc.Invoke(ctx, "/teleport.terminal.v1.TerminalService/ListDatabaseUsers", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *terminalServiceClient) ListGateways(ctx context.Context, in *ListGatewaysRequest, opts ...grpc.CallOption) (*ListGatewaysResponse, error) { out := new(ListGatewaysResponse) err := c.cc.Invoke(ctx, "/teleport.terminal.v1.TerminalService/ListGateways", in, out, opts...) @@ -2078,6 +2224,8 @@ type TerminalServiceServer interface { ListLeafClusters(context.Context, *ListLeafClustersRequest) (*ListClustersResponse, error) // ListDatabases lists databases ListDatabases(context.Context, *ListDatabasesRequest) (*ListDatabasesResponse, error) + // ListDatabaseUsers lists allowed users for the given database based on the role set. + ListDatabaseUsers(context.Context, *ListDatabaseUsersRequest) (*ListDatabaseUsersResponse, error) // ListGateways lists gateways ListGateways(context.Context, *ListGatewaysRequest) (*ListGatewaysResponse, error) // ListServers lists servers @@ -2117,6 +2265,9 @@ func (*UnimplementedTerminalServiceServer) ListLeafClusters(context.Context, *Li func (*UnimplementedTerminalServiceServer) ListDatabases(context.Context, *ListDatabasesRequest) (*ListDatabasesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListDatabases not implemented") } +func (*UnimplementedTerminalServiceServer) ListDatabaseUsers(context.Context, *ListDatabaseUsersRequest) (*ListDatabaseUsersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListDatabaseUsers not implemented") +} func (*UnimplementedTerminalServiceServer) ListGateways(context.Context, *ListGatewaysRequest) (*ListGatewaysResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListGateways not implemented") } @@ -2212,6 +2363,24 @@ func _TerminalService_ListDatabases_Handler(srv interface{}, ctx context.Context return interceptor(ctx, in, info, handler) } +func _TerminalService_ListDatabaseUsers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListDatabaseUsersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TerminalServiceServer).ListDatabaseUsers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/teleport.terminal.v1.TerminalService/ListDatabaseUsers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TerminalServiceServer).ListDatabaseUsers(ctx, req.(*ListDatabaseUsersRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _TerminalService_ListGateways_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ListGatewaysRequest) if err := dec(in); err != nil { @@ -2444,6 +2613,10 @@ var _TerminalService_serviceDesc = grpc.ServiceDesc{ MethodName: "ListDatabases", Handler: _TerminalService_ListDatabases_Handler, }, + { + MethodName: "ListDatabaseUsers", + Handler: _TerminalService_ListDatabaseUsers_Handler, + }, { MethodName: "ListGateways", Handler: _TerminalService_ListGateways_Handler, diff --git a/lib/teleterm/api/protogen/js/v1/service_grpc_pb.d.ts b/lib/teleterm/api/protogen/js/v1/service_grpc_pb.d.ts index 6447780161d4e..73b1cff58e76f 100644 --- a/lib/teleterm/api/protogen/js/v1/service_grpc_pb.d.ts +++ b/lib/teleterm/api/protogen/js/v1/service_grpc_pb.d.ts @@ -19,6 +19,7 @@ interface ITerminalServiceService extends grpc.ServiceDefinition; responseDeserialize: grpc.deserialize; } +interface ITerminalServiceService_IListDatabaseUsers extends grpc.MethodDefinition { + path: "/teleport.terminal.v1.TerminalService/ListDatabaseUsers"; + requestStream: false; + responseStream: false; + requestSerialize: grpc.serialize; + requestDeserialize: grpc.deserialize; + responseSerialize: grpc.serialize; + responseDeserialize: grpc.deserialize; +} interface ITerminalServiceService_IListGateways extends grpc.MethodDefinition { path: "/teleport.terminal.v1.TerminalService/ListGateways"; requestStream: false; @@ -175,6 +185,7 @@ export interface ITerminalServiceServer { listRootClusters: grpc.handleUnaryCall; listLeafClusters: grpc.handleUnaryCall; listDatabases: grpc.handleUnaryCall; + listDatabaseUsers: grpc.handleUnaryCall; listGateways: grpc.handleUnaryCall; listServers: grpc.handleUnaryCall; listKubes: grpc.handleUnaryCall; @@ -199,6 +210,9 @@ export interface ITerminalServiceClient { listDatabases(request: v1_service_pb.ListDatabasesRequest, callback: (error: grpc.ServiceError | null, response: v1_service_pb.ListDatabasesResponse) => void): grpc.ClientUnaryCall; listDatabases(request: v1_service_pb.ListDatabasesRequest, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: v1_service_pb.ListDatabasesResponse) => void): grpc.ClientUnaryCall; listDatabases(request: v1_service_pb.ListDatabasesRequest, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: v1_service_pb.ListDatabasesResponse) => void): grpc.ClientUnaryCall; + listDatabaseUsers(request: v1_service_pb.ListDatabaseUsersRequest, callback: (error: grpc.ServiceError | null, response: v1_service_pb.ListDatabaseUsersResponse) => void): grpc.ClientUnaryCall; + listDatabaseUsers(request: v1_service_pb.ListDatabaseUsersRequest, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: v1_service_pb.ListDatabaseUsersResponse) => void): grpc.ClientUnaryCall; + listDatabaseUsers(request: v1_service_pb.ListDatabaseUsersRequest, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: v1_service_pb.ListDatabaseUsersResponse) => void): grpc.ClientUnaryCall; listGateways(request: v1_service_pb.ListGatewaysRequest, callback: (error: grpc.ServiceError | null, response: v1_service_pb.ListGatewaysResponse) => void): grpc.ClientUnaryCall; listGateways(request: v1_service_pb.ListGatewaysRequest, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: v1_service_pb.ListGatewaysResponse) => void): grpc.ClientUnaryCall; listGateways(request: v1_service_pb.ListGatewaysRequest, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: v1_service_pb.ListGatewaysResponse) => void): grpc.ClientUnaryCall; @@ -248,6 +262,9 @@ export class TerminalServiceClient extends grpc.Client implements ITerminalServi public listDatabases(request: v1_service_pb.ListDatabasesRequest, callback: (error: grpc.ServiceError | null, response: v1_service_pb.ListDatabasesResponse) => void): grpc.ClientUnaryCall; public listDatabases(request: v1_service_pb.ListDatabasesRequest, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: v1_service_pb.ListDatabasesResponse) => void): grpc.ClientUnaryCall; public listDatabases(request: v1_service_pb.ListDatabasesRequest, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: v1_service_pb.ListDatabasesResponse) => void): grpc.ClientUnaryCall; + public listDatabaseUsers(request: v1_service_pb.ListDatabaseUsersRequest, callback: (error: grpc.ServiceError | null, response: v1_service_pb.ListDatabaseUsersResponse) => void): grpc.ClientUnaryCall; + public listDatabaseUsers(request: v1_service_pb.ListDatabaseUsersRequest, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: v1_service_pb.ListDatabaseUsersResponse) => void): grpc.ClientUnaryCall; + public listDatabaseUsers(request: v1_service_pb.ListDatabaseUsersRequest, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: v1_service_pb.ListDatabaseUsersResponse) => void): grpc.ClientUnaryCall; public listGateways(request: v1_service_pb.ListGatewaysRequest, callback: (error: grpc.ServiceError | null, response: v1_service_pb.ListGatewaysResponse) => void): grpc.ClientUnaryCall; public listGateways(request: v1_service_pb.ListGatewaysRequest, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: v1_service_pb.ListGatewaysResponse) => void): grpc.ClientUnaryCall; public listGateways(request: v1_service_pb.ListGatewaysRequest, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: v1_service_pb.ListGatewaysResponse) => void): grpc.ClientUnaryCall; diff --git a/lib/teleterm/api/protogen/js/v1/service_grpc_pb.js b/lib/teleterm/api/protogen/js/v1/service_grpc_pb.js index 32469d2d144c3..2c765ff0f5cfd 100644 --- a/lib/teleterm/api/protogen/js/v1/service_grpc_pb.js +++ b/lib/teleterm/api/protogen/js/v1/service_grpc_pb.js @@ -159,6 +159,28 @@ function deserialize_teleport_terminal_v1_ListClustersResponse(buffer_arg) { return v1_service_pb.ListClustersResponse.deserializeBinary(new Uint8Array(buffer_arg)); } +function serialize_teleport_terminal_v1_ListDatabaseUsersRequest(arg) { + if (!(arg instanceof v1_service_pb.ListDatabaseUsersRequest)) { + throw new Error('Expected argument of type teleport.terminal.v1.ListDatabaseUsersRequest'); + } + return Buffer.from(arg.serializeBinary()); +} + +function deserialize_teleport_terminal_v1_ListDatabaseUsersRequest(buffer_arg) { + return v1_service_pb.ListDatabaseUsersRequest.deserializeBinary(new Uint8Array(buffer_arg)); +} + +function serialize_teleport_terminal_v1_ListDatabaseUsersResponse(arg) { + if (!(arg instanceof v1_service_pb.ListDatabaseUsersResponse)) { + throw new Error('Expected argument of type teleport.terminal.v1.ListDatabaseUsersResponse'); + } + return Buffer.from(arg.serializeBinary()); +} + +function deserialize_teleport_terminal_v1_ListDatabaseUsersResponse(buffer_arg) { + return v1_service_pb.ListDatabaseUsersResponse.deserializeBinary(new Uint8Array(buffer_arg)); +} + function serialize_teleport_terminal_v1_ListDatabasesRequest(arg) { if (!(arg instanceof v1_service_pb.ListDatabasesRequest)) { throw new Error('Expected argument of type teleport.terminal.v1.ListDatabasesRequest'); @@ -303,7 +325,7 @@ function deserialize_teleport_terminal_v1_RemoveGatewayRequest(buffer_arg) { } -// TerminalService desribes teleterm service +// TerminalService describes Teleterm service var TerminalServiceService = exports.TerminalServiceService = { // ListRootClusters lists root clusters listRootClusters: { @@ -341,6 +363,18 @@ listDatabases: { responseSerialize: serialize_teleport_terminal_v1_ListDatabasesResponse, responseDeserialize: deserialize_teleport_terminal_v1_ListDatabasesResponse, }, + // ListDatabaseUsers lists allowed users for the given database based on the role set. +listDatabaseUsers: { + path: '/teleport.terminal.v1.TerminalService/ListDatabaseUsers', + requestStream: false, + responseStream: false, + requestType: v1_service_pb.ListDatabaseUsersRequest, + responseType: v1_service_pb.ListDatabaseUsersResponse, + requestSerialize: serialize_teleport_terminal_v1_ListDatabaseUsersRequest, + requestDeserialize: deserialize_teleport_terminal_v1_ListDatabaseUsersRequest, + responseSerialize: serialize_teleport_terminal_v1_ListDatabaseUsersResponse, + responseDeserialize: deserialize_teleport_terminal_v1_ListDatabaseUsersResponse, + }, // ListGateways lists gateways listGateways: { path: '/teleport.terminal.v1.TerminalService/ListGateways', diff --git a/lib/teleterm/api/protogen/js/v1/service_pb.d.ts b/lib/teleterm/api/protogen/js/v1/service_pb.d.ts index c5ae913d80e8d..393d64cb097d0 100644 --- a/lib/teleterm/api/protogen/js/v1/service_pb.d.ts +++ b/lib/teleterm/api/protogen/js/v1/service_pb.d.ts @@ -348,6 +348,50 @@ export namespace ListDatabasesResponse { } } +export class ListDatabaseUsersRequest extends jspb.Message { + getDbUri(): string; + setDbUri(value: string): ListDatabaseUsersRequest; + + + serializeBinary(): Uint8Array; + toObject(includeInstance?: boolean): ListDatabaseUsersRequest.AsObject; + static toObject(includeInstance: boolean, msg: ListDatabaseUsersRequest): ListDatabaseUsersRequest.AsObject; + static extensions: {[key: number]: jspb.ExtensionFieldInfo}; + static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; + static serializeBinaryToWriter(message: ListDatabaseUsersRequest, writer: jspb.BinaryWriter): void; + static deserializeBinary(bytes: Uint8Array): ListDatabaseUsersRequest; + static deserializeBinaryFromReader(message: ListDatabaseUsersRequest, reader: jspb.BinaryReader): ListDatabaseUsersRequest; +} + +export namespace ListDatabaseUsersRequest { + export type AsObject = { + dbUri: string, + } +} + +export class ListDatabaseUsersResponse extends jspb.Message { + clearUsersList(): void; + getUsersList(): Array; + setUsersList(value: Array): ListDatabaseUsersResponse; + addUsers(value: string, index?: number): string; + + + serializeBinary(): Uint8Array; + toObject(includeInstance?: boolean): ListDatabaseUsersResponse.AsObject; + static toObject(includeInstance: boolean, msg: ListDatabaseUsersResponse): ListDatabaseUsersResponse.AsObject; + static extensions: {[key: number]: jspb.ExtensionFieldInfo}; + static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; + static serializeBinaryToWriter(message: ListDatabaseUsersResponse, writer: jspb.BinaryWriter): void; + static deserializeBinary(bytes: Uint8Array): ListDatabaseUsersResponse; + static deserializeBinaryFromReader(message: ListDatabaseUsersResponse, reader: jspb.BinaryReader): ListDatabaseUsersResponse; +} + +export namespace ListDatabaseUsersResponse { + export type AsObject = { + usersList: Array, + } +} + export class CreateGatewayRequest extends jspb.Message { getTargetUri(): string; setTargetUri(value: string): CreateGatewayRequest; diff --git a/lib/teleterm/api/protogen/js/v1/service_pb.js b/lib/teleterm/api/protogen/js/v1/service_pb.js index 04996fb1570ca..1053a0ab8b9b9 100644 --- a/lib/teleterm/api/protogen/js/v1/service_pb.js +++ b/lib/teleterm/api/protogen/js/v1/service_pb.js @@ -37,6 +37,8 @@ goog.exportSymbol('proto.teleport.terminal.v1.ListAppsRequest', null, global); goog.exportSymbol('proto.teleport.terminal.v1.ListAppsResponse', null, global); goog.exportSymbol('proto.teleport.terminal.v1.ListClustersRequest', null, global); goog.exportSymbol('proto.teleport.terminal.v1.ListClustersResponse', null, global); +goog.exportSymbol('proto.teleport.terminal.v1.ListDatabaseUsersRequest', null, global); +goog.exportSymbol('proto.teleport.terminal.v1.ListDatabaseUsersResponse', null, global); goog.exportSymbol('proto.teleport.terminal.v1.ListDatabasesRequest', null, global); goog.exportSymbol('proto.teleport.terminal.v1.ListDatabasesResponse', null, global); goog.exportSymbol('proto.teleport.terminal.v1.ListGatewaysRequest', null, global); @@ -347,6 +349,48 @@ if (goog.DEBUG && !COMPILED) { */ proto.teleport.terminal.v1.ListDatabasesResponse.displayName = 'proto.teleport.terminal.v1.ListDatabasesResponse'; } +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.teleport.terminal.v1.ListDatabaseUsersRequest = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, null, null); +}; +goog.inherits(proto.teleport.terminal.v1.ListDatabaseUsersRequest, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.teleport.terminal.v1.ListDatabaseUsersRequest.displayName = 'proto.teleport.terminal.v1.ListDatabaseUsersRequest'; +} +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.teleport.terminal.v1.ListDatabaseUsersResponse = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, proto.teleport.terminal.v1.ListDatabaseUsersResponse.repeatedFields_, null); +}; +goog.inherits(proto.teleport.terminal.v1.ListDatabaseUsersResponse, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.teleport.terminal.v1.ListDatabaseUsersResponse.displayName = 'proto.teleport.terminal.v1.ListDatabaseUsersResponse'; +} /** * Generated by JsPbCodeGenerator. * @param {Array=} opt_data Optional initial data array, typically from a @@ -2629,6 +2673,292 @@ proto.teleport.terminal.v1.ListDatabasesResponse.prototype.clearDatabasesList = +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.teleport.terminal.v1.ListDatabaseUsersRequest.prototype.toObject = function(opt_includeInstance) { + return proto.teleport.terminal.v1.ListDatabaseUsersRequest.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.teleport.terminal.v1.ListDatabaseUsersRequest} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.teleport.terminal.v1.ListDatabaseUsersRequest.toObject = function(includeInstance, msg) { + var f, obj = { + dbUri: jspb.Message.getFieldWithDefault(msg, 1, "") + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.teleport.terminal.v1.ListDatabaseUsersRequest} + */ +proto.teleport.terminal.v1.ListDatabaseUsersRequest.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.teleport.terminal.v1.ListDatabaseUsersRequest; + return proto.teleport.terminal.v1.ListDatabaseUsersRequest.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.teleport.terminal.v1.ListDatabaseUsersRequest} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.teleport.terminal.v1.ListDatabaseUsersRequest} + */ +proto.teleport.terminal.v1.ListDatabaseUsersRequest.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = /** @type {string} */ (reader.readString()); + msg.setDbUri(value); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.teleport.terminal.v1.ListDatabaseUsersRequest.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.teleport.terminal.v1.ListDatabaseUsersRequest.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.teleport.terminal.v1.ListDatabaseUsersRequest} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.teleport.terminal.v1.ListDatabaseUsersRequest.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getDbUri(); + if (f.length > 0) { + writer.writeString( + 1, + f + ); + } +}; + + +/** + * optional string db_uri = 1; + * @return {string} + */ +proto.teleport.terminal.v1.ListDatabaseUsersRequest.prototype.getDbUri = function() { + return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 1, "")); +}; + + +/** + * @param {string} value + * @return {!proto.teleport.terminal.v1.ListDatabaseUsersRequest} returns this + */ +proto.teleport.terminal.v1.ListDatabaseUsersRequest.prototype.setDbUri = function(value) { + return jspb.Message.setProto3StringField(this, 1, value); +}; + + + +/** + * List of repeated fields within this message type. + * @private {!Array} + * @const + */ +proto.teleport.terminal.v1.ListDatabaseUsersResponse.repeatedFields_ = [1]; + + + +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.teleport.terminal.v1.ListDatabaseUsersResponse.prototype.toObject = function(opt_includeInstance) { + return proto.teleport.terminal.v1.ListDatabaseUsersResponse.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.teleport.terminal.v1.ListDatabaseUsersResponse} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.teleport.terminal.v1.ListDatabaseUsersResponse.toObject = function(includeInstance, msg) { + var f, obj = { + usersList: (f = jspb.Message.getRepeatedField(msg, 1)) == null ? undefined : f + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.teleport.terminal.v1.ListDatabaseUsersResponse} + */ +proto.teleport.terminal.v1.ListDatabaseUsersResponse.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.teleport.terminal.v1.ListDatabaseUsersResponse; + return proto.teleport.terminal.v1.ListDatabaseUsersResponse.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.teleport.terminal.v1.ListDatabaseUsersResponse} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.teleport.terminal.v1.ListDatabaseUsersResponse} + */ +proto.teleport.terminal.v1.ListDatabaseUsersResponse.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = /** @type {string} */ (reader.readString()); + msg.addUsers(value); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.teleport.terminal.v1.ListDatabaseUsersResponse.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.teleport.terminal.v1.ListDatabaseUsersResponse.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.teleport.terminal.v1.ListDatabaseUsersResponse} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.teleport.terminal.v1.ListDatabaseUsersResponse.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getUsersList(); + if (f.length > 0) { + writer.writeRepeatedString( + 1, + f + ); + } +}; + + +/** + * repeated string users = 1; + * @return {!Array} + */ +proto.teleport.terminal.v1.ListDatabaseUsersResponse.prototype.getUsersList = function() { + return /** @type {!Array} */ (jspb.Message.getRepeatedField(this, 1)); +}; + + +/** + * @param {!Array} value + * @return {!proto.teleport.terminal.v1.ListDatabaseUsersResponse} returns this + */ +proto.teleport.terminal.v1.ListDatabaseUsersResponse.prototype.setUsersList = function(value) { + return jspb.Message.setField(this, 1, value || []); +}; + + +/** + * @param {string} value + * @param {number=} opt_index + * @return {!proto.teleport.terminal.v1.ListDatabaseUsersResponse} returns this + */ +proto.teleport.terminal.v1.ListDatabaseUsersResponse.prototype.addUsers = function(value, opt_index) { + return jspb.Message.addToRepeatedField(this, 1, value, opt_index); +}; + + +/** + * Clears the list making it empty but non-null. + * @return {!proto.teleport.terminal.v1.ListDatabaseUsersResponse} returns this + */ +proto.teleport.terminal.v1.ListDatabaseUsersResponse.prototype.clearUsersList = function() { + return this.setUsersList([]); +}; + + + + + if (jspb.Message.GENERATE_TO_OBJECT) { /** * Creates an object representation of this proto. diff --git a/lib/teleterm/apiserver/handler/handler_databases.go b/lib/teleterm/apiserver/handler/handler_databases.go index e89d9a0bfa0b1..07dda73bd0542 100644 --- a/lib/teleterm/apiserver/handler/handler_databases.go +++ b/lib/teleterm/apiserver/handler/handler_databases.go @@ -44,6 +44,27 @@ func (s *Handler) ListDatabases(ctx context.Context, req *api.ListDatabasesReque return response, nil } +// ListDatabaseUsers is used to list database user suggestions when the user is attempting to +// establish a connection to a database through Teleterm. +// +// The list is based on whatever we can deduce from the role set, so it's similar to the behavior of +// `tsh db ls -v`, with the exception that Teleterm is interested only in the allowed usernames. +func (s *Handler) ListDatabaseUsers(ctx context.Context, req *api.ListDatabaseUsersRequest) (*api.ListDatabaseUsersResponse, error) { + cluster, err := s.DaemonService.ResolveCluster(req.DbUri) + if err != nil { + return nil, trace.Wrap(err) + } + + dbUsers, err := cluster.GetAllowedDatabaseUsers(ctx, req.DbUri) + if err != nil { + return nil, trace.Wrap(err) + } + + return &api.ListDatabaseUsersResponse{ + Users: dbUsers, + }, nil +} + func newAPIDatabase(db clusters.Database) *api.Database { apiLabels := APILabels{} for name, value := range db.GetAllLabels() { diff --git a/lib/teleterm/clusters/cluster_databases.go b/lib/teleterm/clusters/cluster_databases.go index e02d3c6095830..f35dbaeb96de5 100644 --- a/lib/teleterm/clusters/cluster_databases.go +++ b/lib/teleterm/clusters/cluster_databases.go @@ -25,6 +25,7 @@ import ( "github.com/gravitational/teleport/lib/client" dbprofile "github.com/gravitational/teleport/lib/client/db" libdefaults "github.com/gravitational/teleport/lib/defaults" + "github.com/gravitational/teleport/lib/services" "github.com/gravitational/teleport/lib/teleterm/api/uri" "github.com/gravitational/teleport/lib/tlsca" @@ -121,3 +122,20 @@ func (c *Cluster) ReissueDBCerts(ctx context.Context, user string, db types.Data return nil } + +// GetAllowedDatabaseUsers returns allowed users for the given database based on the role set. +func (c *Cluster) GetAllowedDatabaseUsers(ctx context.Context, dbURI string) ([]string, error) { + roleSet, err := services.FetchRoles(c.status.Roles, c.clusterClient, c.status.Traits) + if err != nil { + return nil, trace.Wrap(err) + } + + db, err := c.GetDatabase(ctx, dbURI) + if err != nil { + return nil, trace.Wrap(err) + } + + dbUsers := roleSet.EnumerateDatabaseUsers(db) + + return dbUsers.Allowed(), nil +} diff --git a/lib/utils/agentconn/agent_windows.go b/lib/utils/agentconn/agent_windows.go index 68e2ef6bf94eb..04ac71cfddbeb 100644 --- a/lib/utils/agentconn/agent_windows.go +++ b/lib/utils/agentconn/agent_windows.go @@ -22,18 +22,18 @@ package agentconn import ( "net" - "github.com/gravitational/teleport/lib/defaults" - "github.com/gravitational/trace" "github.com/Microsoft/go-winio" ) +const namedPipe = `\\.\pipe\openssh-ssh-agent` + // Dial creates net.Conn to a SSH agent listening on a Windows named pipe. // This is behind a build flag because winio.DialPipe is only available on // Windows. func Dial(socket string) (net.Conn, error) { - conn, err := winio.DialPipe(defaults.WindowsOpenSSHNamedPipe, nil) + conn, err := winio.DialPipe(namedPipe, nil) if err != nil { return nil, trace.Wrap(err) } diff --git a/lib/utils/fs.go b/lib/utils/fs.go index c1295e5347b97..1b093317e5f00 100644 --- a/lib/utils/fs.go +++ b/lib/utils/fs.go @@ -157,6 +157,7 @@ func StatDir(path string) (os.FileInfo, error) { // getHomeDir returns the home directory based off the OS. func getHomeDir() string { + // TODO(zmb3): use os.UserHomeDir instead and remove these constants switch runtime.GOOS { case constants.LinuxOS: return os.Getenv(teleport.EnvHome) diff --git a/lib/web/apiserver.go b/lib/web/apiserver.go index 8527a407b2432..850c4642e47c5 100644 --- a/lib/web/apiserver.go +++ b/lib/web/apiserver.go @@ -1866,13 +1866,21 @@ func (h *Handler) clusterNodesGet(w http.ResponseWriter, r *http.Request, p http if err != nil { return nil, trace.Wrap(err) } - servers, err := clt.GetNodes(r.Context(), apidefaults.Namespace) + + resp, err := listResources(clt, r, types.KindNode) + if err != nil { + return nil, trace.Wrap(err) + } + + servers, err := types.ResourcesWithLabels(resp.Resources).AsServers() if err != nil { return nil, trace.Wrap(err) } return listResourcesGetResponse{ - Items: ui.MakeServers(site.GetName(), servers), + Items: ui.MakeServers(site.GetName(), servers), + StartKey: resp.NextKey, + TotalCount: resp.TotalCount, }, nil } @@ -1894,8 +1902,8 @@ func (h *Handler) siteNodeConnect( r *http.Request, p httprouter.Params, ctx *SessionContext, - site reversetunnel.RemoteSite) (interface{}, error) { - + site reversetunnel.RemoteSite, +) (interface{}, error) { q := r.URL.Query() params := q.Get("params") if params == "" { @@ -2719,13 +2727,27 @@ type ssoRequestParams struct { } func parseSSORequestParams(r *http.Request) (*ssoRequestParams, error) { - query := r.URL.Query() - - clientRedirectURL := query.Get("redirect_url") + // Manually grab the value from query param "redirect_url". + // + // The "redirect_url" param can contain its own query params such as in + // "https://localhost/login?connector_id=github&redirect_url=https://localhost:8080/web/cluster/im-a-cluster-name/nodes?search=tunnel&sort=hostname:asc", + // which would be incorrectly parsed with the standard method. + // For example a call to r.URL.Query().Get("redirect_url") in the example above would return + // "https://localhost:8080/web/cluster/im-a-cluster-name/nodes?search=tunnel", + // as it would take the "&sort=hostname:asc" to be a separate query param. + // + // This logic assumes that anything coming after "redirect_url" is part of + // the redirect URL. + splittedRawQuery := strings.Split(r.URL.RawQuery, "&redirect_url=") + var clientRedirectURL string + if len(splittedRawQuery) > 1 { + clientRedirectURL, _ = url.QueryUnescape(splittedRawQuery[1]) + } if clientRedirectURL == "" { return nil, trace.BadParameter("missing redirect_url query parameter") } + query := r.URL.Query() connectorID := query.Get("connector_id") if connectorID == "" { return nil, trace.BadParameter("missing connector_id query parameter") diff --git a/lib/web/apiserver_test.go b/lib/web/apiserver_test.go index b1684433a90b0..a68b1f3d7e834 100644 --- a/lib/web/apiserver_test.go +++ b/lib/web/apiserver_test.go @@ -240,7 +240,7 @@ func (s *WebSuite) SetUpTest(c *C) { nodeDataDir, "", utils.NetAddr{}, - nil, + nodeClient, regular.SetUUID(nodeID), regular.SetNamespace(apidefaults.Namespace), regular.SetShell("/bin/sh"), @@ -315,7 +315,7 @@ func (s *WebSuite) SetUpTest(c *C) { c.MkDir(), "", utils.NetAddr{}, - nil, + s.proxyClient, regular.SetUUID(proxyID), regular.SetProxyMode(revTunServer, s.proxyClient), regular.SetSessionServer(s.proxyClient), @@ -551,12 +551,12 @@ func (s *WebSuite) TestSAMLSuccess(c *C) { err = s.server.Auth().CreateSAMLConnector(connector) c.Assert(err, IsNil) - s.server.Auth().SetClock(clockwork.NewFakeClockAt(time.Date(2017, 05, 10, 18, 53, 0, 0, time.UTC))) + s.server.Auth().SetClock(clockwork.NewFakeClockAt(time.Date(2017, 5, 10, 18, 53, 0, 0, time.UTC))) clt := s.clientNoRedirects() csrfToken := "2ebcb768d0090ea4368e42880c970b61865c326172a4a2343b645cf5d7f20992" - baseURL, err := url.Parse(clt.Endpoint("webapi", "saml", "sso") + `?redirect_url=http://localhost/after&connector_id=` + connector.GetName()) + baseURL, err := url.Parse(clt.Endpoint("webapi", "saml", "sso") + `?connector_id=` + connector.GetName() + `&redirect_url=http://localhost/after`) c.Assert(err, IsNil) req, err := http.NewRequest("GET", baseURL.String(), nil) c.Assert(err, IsNil) @@ -787,6 +787,7 @@ func TestClusterNodesGet(t *testing.T) { nodes := clusterNodesGetResponse{} require.NoError(t, json.Unmarshal(re.Bytes(), &nodes)) require.Len(t, nodes.Items, 1) + require.Equal(t, 1, nodes.TotalCount) // Get nodes using shortcut. re, err = pack.clt.Get(context.Background(), pack.clt.Endpoint("webapi", "sites", currentSiteShortcut, "nodes"), url.Values{}) @@ -2115,7 +2116,8 @@ func TestClusterDatabasesGet(t *testing.T) { require.NoError(t, err) type testResponse struct { - Items []ui.Database `json:"items"` + Items []ui.Database `json:"items"` + TotalCount int `json:"totalCount"` } // No db registered. @@ -2145,6 +2147,7 @@ func TestClusterDatabasesGet(t *testing.T) { resp = testResponse{} require.NoError(t, json.Unmarshal(re.Bytes(), &resp)) require.Len(t, resp.Items, 1) + require.Equal(t, 1, resp.TotalCount) require.EqualValues(t, ui.Database{ Name: "test-db-name", Desc: "test-description", @@ -2165,7 +2168,8 @@ func TestClusterKubesGet(t *testing.T) { require.NoError(t, err) type testResponse struct { - Items []ui.Kube `json:"items"` + Items []ui.KubeCluster `json:"items"` + TotalCount int `json:"totalCount"` } // No kube registered. @@ -2200,12 +2204,70 @@ func TestClusterKubesGet(t *testing.T) { resp = testResponse{} require.NoError(t, json.Unmarshal(re.Bytes(), &resp)) require.Len(t, resp.Items, 1) - require.EqualValues(t, ui.Kube{ + require.Equal(t, 1, resp.TotalCount) + require.EqualValues(t, ui.KubeCluster{ Name: "test-kube-name", Labels: []ui.Label{{Name: "test-field", Value: "test-value"}}, }, resp.Items[0]) } +func TestClusterAppsGet(t *testing.T) { + env := newWebPack(t, 1) + + proxy := env.proxies[0] + pack := proxy.authPack(t, "test-user@example.com") + + type testResponse struct { + Items []ui.App `json:"items"` + TotalCount int `json:"totalCount"` + } + + resource := &types.AppServerV3{ + Metadata: types.Metadata{Name: "test-app"}, + Kind: types.KindAppServer, + Version: types.V2, + Spec: types.AppServerSpecV3{ + HostID: "hostid", + App: &types.AppV3{ + Metadata: types.Metadata{ + Name: "name", + Description: "description", + Labels: map[string]string{"test-field": "test-value"}, + }, + Spec: types.AppSpecV3{ + URI: "https://console.aws.amazon.com", // sets field awsConsole to true + PublicAddr: "publicaddrs", + }, + }, + }, + } + + // Register a app service. + _, err := env.server.Auth().UpsertApplicationServer(context.Background(), resource) + require.NoError(t, err) + + // Make the call. + endpoint := pack.clt.Endpoint("webapi", "sites", env.server.ClusterName(), "apps") + re, err := pack.clt.Get(context.Background(), endpoint, url.Values{}) + require.NoError(t, err) + + // Test correct response. + resp := testResponse{} + require.NoError(t, json.Unmarshal(re.Bytes(), &resp)) + require.Len(t, resp.Items, 1) + require.Equal(t, 1, resp.TotalCount) + require.EqualValues(t, ui.App{ + Name: resource.Spec.App.GetName(), + Description: resource.Spec.App.GetDescription(), + URI: resource.Spec.App.GetURI(), + PublicAddr: resource.Spec.App.GetPublicAddr(), + Labels: []ui.Label{{Name: "test-field", Value: "test-value"}}, + FQDN: resource.Spec.App.GetPublicAddr(), + ClusterID: env.server.ClusterName(), + AWSConsole: true, + }, resp.Items[0]) +} + // TestApplicationAccessDisabled makes sure application access can be disabled // via modules. func TestApplicationAccessDisabled(t *testing.T) { @@ -3053,6 +3115,74 @@ func TestChangeUserAuthentication_recoveryCodesReturnedForCloud(t *testing.T) { require.NotEmpty(t, re.Recovery.Created) } +func TestParseSSORequestParams(t *testing.T) { + t.Parallel() + + token := "someMeaninglessTokenString" + + tests := []struct { + name, url string + wantErr bool + expected *ssoRequestParams + }{ + { + name: "preserve redirect's query params (escaped)", + url: "https://localhost/login?connector_id=oidc&redirect_url=https:%2F%2Flocalhost:8080%2Fweb%2Fcluster%2Fim-a-cluster-name%2Fnodes%3Fsearch=tunnel&sort=hostname:asc", + expected: &ssoRequestParams{ + clientRedirectURL: "https://localhost:8080/web/cluster/im-a-cluster-name/nodes?search=tunnel&sort=hostname:asc", + connectorID: "oidc", + csrfToken: token, + }, + }, + { + name: "preserve redirect's query params (unescaped)", + url: "https://localhost/login?connector_id=github&redirect_url=https://localhost:8080/web/cluster/im-a-cluster-name/nodes?search=tunnel&sort=hostname:asc", + expected: &ssoRequestParams{ + clientRedirectURL: "https://localhost:8080/web/cluster/im-a-cluster-name/nodes?search=tunnel&sort=hostname:asc", + connectorID: "github", + csrfToken: token, + }, + }, + { + name: "preserve various encoded chars", + url: "https://localhost/login?connector_id=saml&redirect_url=https:%2F%2Flocalhost:8080%2Fweb%2Fcluster%2Fim-a-cluster-name%2Fapps%3Fquery=search(%2522watermelon%2522%252C%2520%2522this%2522)%2520%2526%2526%2520labels%255B%2522unique-id%2522%255D%2520%253D%253D%2520%2522hi%2522&sort=name:asc", + expected: &ssoRequestParams{ + clientRedirectURL: "https://localhost:8080/web/cluster/im-a-cluster-name/apps?query=search(%22watermelon%22%2C%20%22this%22)%20%26%26%20labels%5B%22unique-id%22%5D%20%3D%3D%20%22hi%22&sort=name:asc", + connectorID: "saml", + csrfToken: token, + }, + }, + { + name: "invalid redirect_url query param", + url: "https://localhost/login?redirect=https://localhost/nodes&connector_id=oidc", + wantErr: true, + }, + { + name: "invalid connector_id query param", + url: "https://localhost/login?redirect_url=https://localhost/nodes&connector=oidc", + wantErr: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + req, err := http.NewRequest("", tc.url, nil) + require.NoError(t, err) + addCSRFCookieToReq(req, token) + + params, err := parseSSORequestParams(req) + + switch { + case tc.wantErr: + require.Error(t, err) + default: + require.NoError(t, err) + require.Equal(t, tc.expected, params) + } + }) + } +} + type authProviderMock struct { server types.ServerV2 } @@ -3446,7 +3576,7 @@ func newWebPack(t *testing.T, numProxies int) *webPack { nodeDataDir, "", utils.NetAddr{}, - nil, + nodeClient, regular.SetUUID(nodeID), regular.SetNamespace(apidefaults.Namespace), regular.SetShell("/bin/sh"), @@ -3491,8 +3621,8 @@ func newWebPack(t *testing.T, numProxies int) *webPack { } func createProxy(ctx context.Context, t *testing.T, proxyID string, node *regular.Server, authServer *auth.TestTLSServer, - hostSigners []ssh.Signer, clock clockwork.FakeClock) *proxy { - + hostSigners []ssh.Signer, clock clockwork.FakeClock, +) *proxy { // create reverse tunnel service: client, err := authServer.NewClient(auth.TestIdentity{ I: auth.BuiltinRole{ @@ -3551,7 +3681,7 @@ func createProxy(ctx context.Context, t *testing.T, proxyID string, node *regula t.TempDir(), "", utils.NetAddr{}, - nil, + client, regular.SetUUID(proxyID), regular.SetProxyMode(revTunServer, client), regular.SetSessionServer(client), diff --git a/lib/web/apps.go b/lib/web/apps.go index 4972fb01cf357..3a77d15dbf75b 100644 --- a/lib/web/apps.go +++ b/lib/web/apps.go @@ -53,7 +53,12 @@ func (h *Handler) clusterAppsGet(w http.ResponseWriter, r *http.Request, p httpr return nil, trace.Wrap(err) } - appServers, err := clt.GetApplicationServers(r.Context(), apidefaults.Namespace) + resp, err := listResources(clt, r, types.KindAppServer) + if err != nil { + return nil, trace.Wrap(err) + } + + appServers, err := types.ResourcesWithLabels(resp.Resources).AsAppServers() if err != nil { return nil, trace.Wrap(err) } @@ -71,6 +76,8 @@ func (h *Handler) clusterAppsGet(w http.ResponseWriter, r *http.Request, p httpr Identity: identity, Apps: types.DeduplicateApps(apps), }), + StartKey: resp.NextKey, + TotalCount: resp.TotalCount, }, nil } diff --git a/lib/web/assets.go b/lib/web/assets.go index a020624986026..3628693bec88b 100644 --- a/lib/web/assets.go +++ b/lib/web/assets.go @@ -26,8 +26,6 @@ import ( "strings" "github.com/gravitational/trace" - - "github.com/kardianos/osext" ) // NewDebugFileSystem returns the HTTP file system implementation rooted @@ -35,7 +33,7 @@ import ( func NewDebugFileSystem(assetsPath string) (http.FileSystem, error) { assetsToCheck := []string{"index.html", "/app"} if assetsPath == "" { - exePath, err := osext.ExecutableFolder() + exePath, err := executableFolder() if err != nil { return nil, trace.Wrap(err) } @@ -62,6 +60,14 @@ func NewDebugFileSystem(assetsPath string) (http.FileSystem, error) { return http.Dir(assetsPath), nil } +func executableFolder() (string, error) { + p, err := os.Executable() + if err != nil { + return "", trace.Wrap(err) + } + return filepath.Dir(filepath.Clean(p)), nil +} + const ( webAssetsMissingError = "the teleport binary was built without web assets, try building with `make release`" webAssetsReadError = "failure reading web assets from the binary" diff --git a/lib/web/resources.go b/lib/web/resources.go index 37c30e6fe4f80..41f81bcfcbc5e 100644 --- a/lib/web/resources.go +++ b/lib/web/resources.go @@ -21,7 +21,10 @@ import ( "net/http" "strings" + "github.com/gravitational/teleport/api/client/proto" "github.com/gravitational/teleport/api/types" + "github.com/gravitational/teleport/lib/client" + "github.com/gravitational/teleport/lib/defaults" "github.com/gravitational/teleport/lib/httplib" "github.com/gravitational/teleport/lib/services" "github.com/gravitational/teleport/lib/web/ui" @@ -288,6 +291,44 @@ func ExtractResourceAndValidate(yaml string) (*services.UnknownResource, error) return &unknownRes, nil } +// listResources gets a list of resources depending on the type of resource. +func listResources(clt resourcesAPIGetter, r *http.Request, resourceKind string) (*types.ListResourcesResponse, error) { + values := r.URL.Query() + + limit, err := queryLimit(values, "limit", defaults.MaxIterationLimit) + if err != nil { + return nil, trace.Wrap(err) + } + + // Sort is expected in format `:` where + // index 0 is fieldName and index 1 is direction. + // If a direction is not set, or is not recognized, it defaults to ASC. + var sortBy types.SortBy + sortParam := values.Get("sort") + if sortParam != "" { + vals := strings.Split(sortParam, ":") + if vals[0] != "" { + sortBy.Field = vals[0] + if len(vals) > 1 && vals[1] == "desc" { + sortBy.IsDesc = true + } + } + } + + startKey := values.Get("startKey") + req := proto.ListResourcesRequest{ + ResourceType: resourceKind, + Limit: int32(limit), + StartKey: startKey, + NeedTotalCount: startKey == "", + SortBy: sortBy, + PredicateExpression: values.Get("query"), + SearchKeywords: client.ParseSearchKeywords(values.Get("search"), ' '), + } + + return clt.ListResources(r.Context(), req) +} + type listResourcesGetResponse struct { // Items is a list of resources retrieved. Items interface{} `json:"items"` @@ -321,4 +362,6 @@ type resourcesAPIGetter interface { GetTrustedClusters(ctx context.Context) ([]types.TrustedCluster, error) // DeleteTrustedCluster removes a TrustedCluster from the backend by name. DeleteTrustedCluster(ctx context.Context, name string) error + // ListResoures returns a paginated list of resources. + ListResources(ctx context.Context, req proto.ListResourcesRequest) (*types.ListResourcesResponse, error) } diff --git a/lib/web/resources_test.go b/lib/web/resources_test.go index 929008e0d4387..9e34e65fd2397 100644 --- a/lib/web/resources_test.go +++ b/lib/web/resources_test.go @@ -18,9 +18,12 @@ package web import ( "context" + "net/http" "testing" + "github.com/gravitational/teleport/api/client/proto" "github.com/gravitational/teleport/api/types" + "github.com/gravitational/teleport/lib/defaults" "github.com/gravitational/teleport/lib/web/ui" "github.com/gravitational/trace" @@ -331,6 +334,99 @@ version: v2` require.Contains(t, tc.Content, "name: test-goodcontent") } +func TestListResources(t *testing.T) { + t.Parallel() + + // Test parsing query params. + testCases := []struct { + name, url string + wantBadParamErr bool + expected proto.ListResourcesRequest + }{ + { + name: "decode complex query correctly", + url: "https://dev:3080/login?query=(labels%5B%60%22test%22%60%5D%20%3D%3D%20%22%2B%3A'%2C%23*~%25%5E%22%20%26%26%20!exists(labels.tier))%20%7C%7C%20resource.spec.description%20!%3D%20%22weird%20example%20https%3A%2F%2Ffoo.dev%3A3080%3Fbar%3Da%2Cb%26baz%3Dbanana%22", + expected: proto.ListResourcesRequest{ + ResourceType: types.KindNode, + Limit: defaults.MaxIterationLimit, + NeedTotalCount: true, + PredicateExpression: "(labels[`\"test\"`] == \"+:',#*~%^\" && !exists(labels.tier)) || resource.spec.description != \"weird example https://foo.dev:3080?bar=a,b&baz=banana\"", + }, + }, + { + name: "all param defined and set", + url: `https://dev:3080/login?query=labels.env%20%3D%3D%20%22prod%22&limit=50&startKey=banana&sort=foo:desc&search=foo%2Bbar+baz+foo%2Cbar+%22some%20phrase%22`, + expected: proto.ListResourcesRequest{ + ResourceType: types.KindNode, + Limit: 50, + StartKey: "banana", + SearchKeywords: []string{"foo+bar", "baz", "foo,bar", "some phrase"}, + PredicateExpression: `labels.env == "prod"`, + SortBy: types.SortBy{Field: "foo", IsDesc: true}, + }, + }, + { + name: "all query param defined but empty", + url: `https://dev:3080/login?query=&startKey=&search=&sort=&limit=&startKey=`, + expected: proto.ListResourcesRequest{ + ResourceType: types.KindNode, + Limit: defaults.MaxIterationLimit, + NeedTotalCount: true, + }, + }, + { + name: "sort partially defined: fieldName", + url: `https://dev:3080/login?sort=foo`, + expected: proto.ListResourcesRequest{ + ResourceType: types.KindNode, + Limit: defaults.MaxIterationLimit, + SortBy: types.SortBy{Field: "foo", IsDesc: false}, + NeedTotalCount: true, + }, + }, + { + name: "sort partially defined: fieldName with colon", + url: `https://dev:3080/login?sort=foo:`, + expected: proto.ListResourcesRequest{ + ResourceType: types.KindNode, + Limit: defaults.MaxIterationLimit, + SortBy: types.SortBy{Field: "foo", IsDesc: false}, + NeedTotalCount: true, + }, + }, + { + name: "invalid limit value", + wantBadParamErr: true, + url: `https://dev:3080/login?limit=12invalid`, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + httpReq, err := http.NewRequest("", tc.url, nil) + require.NoError(t, err) + + m := &mockedResourceAPIGetter{} + m.mockListResources = func(ctx context.Context, req proto.ListResourcesRequest) (*types.ListResourcesResponse, error) { + if !tc.wantBadParamErr { + require.Equal(t, tc.expected, req) + } + return nil, nil + } + + _, err = listResources(m, httpReq, types.KindNode) + if tc.wantBadParamErr { + require.True(t, trace.IsBadParameter(err)) + } else { + require.NoError(t, err) + } + }) + } +} + type mockedResourceAPIGetter struct { mockGetRole func(ctx context.Context, name string) (types.Role, error) mockGetRoles func(ctx context.Context) ([]types.Role, error) @@ -343,6 +439,7 @@ type mockedResourceAPIGetter struct { mockGetTrustedCluster func(ctx context.Context, name string) (types.TrustedCluster, error) mockGetTrustedClusters func(ctx context.Context) ([]types.TrustedCluster, error) mockDeleteTrustedCluster func(ctx context.Context, name string) error + mockListResources func(ctx context.Context, req proto.ListResourcesRequest) (*types.ListResourcesResponse, error) } func (m *mockedResourceAPIGetter) GetRole(ctx context.Context, name string) (types.Role, error) { @@ -430,3 +527,11 @@ func (m *mockedResourceAPIGetter) DeleteTrustedCluster(ctx context.Context, name return trace.NotImplemented("mockDeleteTrustedCluster not implemented") } + +func (m *mockedResourceAPIGetter) ListResources(ctx context.Context, req proto.ListResourcesRequest) (*types.ListResourcesResponse, error) { + if m.mockListResources != nil { + return m.mockListResources(ctx, req) + } + + return nil, trace.NotImplemented("mockListResources not implemented") +} diff --git a/lib/web/servers.go b/lib/web/servers.go index ad10a058b4b79..3e8c34d8825d3 100644 --- a/lib/web/servers.go +++ b/lib/web/servers.go @@ -19,7 +19,6 @@ package web import ( "net/http" - apidefaults "github.com/gravitational/teleport/api/defaults" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/reversetunnel" "github.com/gravitational/teleport/lib/web/ui" @@ -35,14 +34,20 @@ func (h *Handler) clusterKubesGet(w http.ResponseWriter, r *http.Request, p http return nil, trace.Wrap(err) } - // Get a list of kube servers. - kubeServers, err := clt.GetKubeServices(r.Context()) + resp, err := listResources(clt, r, types.KindKubernetesCluster) + if err != nil { + return nil, trace.Wrap(err) + } + + clusters, err := types.ResourcesWithLabels(resp.Resources).AsKubeClusters() if err != nil { return nil, trace.Wrap(err) } return listResourcesGetResponse{ - Items: ui.MakeKubes(h.auth.clusterName, kubeServers), + Items: ui.MakeKubeClusters(clusters), + StartKey: resp.NextKey, + TotalCount: resp.TotalCount, }, nil } @@ -53,8 +58,12 @@ func (h *Handler) clusterDatabasesGet(w http.ResponseWriter, r *http.Request, p return nil, trace.Wrap(err) } - // Get a list of database servers. - servers, err := clt.GetDatabaseServers(r.Context(), apidefaults.Namespace) + resp, err := listResources(clt, r, types.KindDatabaseServer) + if err != nil { + return nil, trace.Wrap(err) + } + + servers, err := types.ResourcesWithLabels(resp.Resources).AsDatabaseServers() if err != nil { return nil, trace.Wrap(err) } @@ -66,7 +75,9 @@ func (h *Handler) clusterDatabasesGet(w http.ResponseWriter, r *http.Request, p } return listResourcesGetResponse{ - Items: ui.MakeDatabases(h.auth.clusterName, types.DeduplicateDatabases(databases)), + Items: ui.MakeDatabases(h.auth.clusterName, types.DeduplicateDatabases(databases)), + StartKey: resp.NextKey, + TotalCount: resp.TotalCount, }, nil } @@ -77,14 +88,21 @@ func (h *Handler) clusterDesktopsGet(w http.ResponseWriter, r *http.Request, p h return nil, trace.Wrap(err) } - windowsDesktops, err := clt.GetWindowsDesktops(r.Context(), types.WindowsDesktopFilter{}) + resp, err := listResources(clt, r, types.KindWindowsDesktop) + if err != nil { + return nil, trace.Wrap(err) + } + + windowsDesktops, err := types.ResourcesWithLabels(resp.Resources).AsWindowsDesktops() if err != nil { return nil, trace.Wrap(err) } windowsDesktops = types.DeduplicateDesktops(windowsDesktops) return listResourcesGetResponse{ - Items: ui.MakeDesktops(windowsDesktops), + Items: ui.MakeDesktops(windowsDesktops), + StartKey: resp.NextKey, + TotalCount: resp.TotalCount, }, nil } diff --git a/lib/web/ui/server.go b/lib/web/ui/server.go index 72f3c7ebfc86d..144f9c6aec2f3 100644 --- a/lib/web/ui/server.go +++ b/lib/web/ui/server.go @@ -21,9 +21,9 @@ import ( "strconv" "strings" - "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/constants" "github.com/gravitational/teleport/api/types" + "github.com/gravitational/teleport/lib/defaults" ) // Label describes label for webapp @@ -101,8 +101,8 @@ func MakeServers(clusterName string, servers []types.Server) []Server { return uiServers } -// Kube describes a kube cluster. -type Kube struct { +// KubeCluster describes a kube cluster. +type KubeCluster struct { // Name is the name of the kube cluster. Name string `json:"name"` // Labels is a map of static and dynamic labels associated with an kube cluster. @@ -110,29 +110,21 @@ type Kube struct { } // MakeKubes creates ui kube objects and returns a list.. -func MakeKubes(clusterName string, servers []types.Server) []Kube { - kubeClusters := map[string]*types.KubernetesCluster{} - - // Get unique kube clusters - for _, server := range servers { - // Process each kube cluster. - for _, cluster := range server.GetKubernetesClusters() { - kubeClusters[cluster.Name] = cluster - } - } - - uiKubeClusters := make([]Kube, 0, len(kubeClusters)) - for _, cluster := range kubeClusters { - uiLabels := []Label{} - - for name, value := range cluster.StaticLabels { +func MakeKubeClusters(clusters []types.KubeCluster) []KubeCluster { + uiKubeClusters := make([]KubeCluster, 0, len(clusters)) + for _, cluster := range clusters { + staticLabels := cluster.GetStaticLabels() + dynamicLabels := cluster.GetDynamicLabels() + uiLabels := make([]Label, 0, len(staticLabels)+len(dynamicLabels)) + + for name, value := range staticLabels { uiLabels = append(uiLabels, Label{ Name: name, Value: value, }) } - for name, cmd := range cluster.DynamicLabels { + for name, cmd := range dynamicLabels { uiLabels = append(uiLabels, Label{ Name: name, Value: cmd.GetResult(), @@ -141,8 +133,8 @@ func MakeKubes(clusterName string, servers []types.Server) []Kube { sort.Sort(sortedLabels(uiLabels)) - uiKubeClusters = append(uiKubeClusters, Kube{ - Name: cluster.Name, + uiKubeClusters = append(uiKubeClusters, KubeCluster{ + Name: cluster.GetName(), Labels: uiLabels, }) } @@ -215,7 +207,7 @@ func MakeDesktop(windowsDesktop types.WindowsDesktop) Desktop { // stripRdpPort strips the default rdp port from an ip address since it is unimportant to display stripRdpPort := func(addr string) string { splitAddr := strings.Split(addr, ":") - if len(splitAddr) > 1 && splitAddr[1] == strconv.Itoa(teleport.StandardRDPPort) { + if len(splitAddr) > 1 && splitAddr[1] == strconv.Itoa(defaults.RDPListenPort) { return splitAddr[0] } return addr diff --git a/rfd/0069-proxy-peering.md b/rfd/0069-proxy-peering.md new file mode 100644 index 0000000000000..a9c6c0526910e --- /dev/null +++ b/rfd/0069-proxy-peering.md @@ -0,0 +1,244 @@ +--- +authors: David Boslee (david@goteleport.com), Naji Obeid (naji@goteleport.com) +state: draft +--- + +# RFD 69 - Proxy Peering + +## What +This document describes an API that enables a proxy to dial the nodes connected to a peer proxy. This is an optional feature that will allow node agents to connect to a single proxy and be reachable through any other proxy in the cluster. + +## Why +Currently node agents dialing over a reverse tunnel are required to connect to every proxy instance in the cluster. This allows a client to connect to a node through any proxy but it also causes other problems when running proxies behind a load balancer like in our cloud environment. These problems include: + +- Ephemeral port exhaustion between a NAT gateway and load balancer. This limits the number of nodes that can be connected behind a single NAT gateway. As more proxies are added fewer nodes are able to connect. +- Thundering herd when adding, removing, or restarting a proxy. Node agents retry connecting until they randomly get balanced to the desired proxy. The more node agents connected the worse this problem becomes. + +Both these issues are mitigated by changing the node agent behavior to connect to a single proxy. Ephemeral port exhaustion is no longer tied to the number of proxies and node agents no longer need to retry until they connect to a specific proxy. + +## Terminology +**User-Proxy** - The proxy a user establishes a connection to. + +**Node-Proxy** - The proxy a node establishes a reverse tunnel to. + +## Details + +### Proxy API + +The following gRPC service will be added to proxy servers: + +```protobuf +syntax = "proto3"; + +package api; + +service ProxyService { rpc DialNode(stream Frame) returns (stream Frame); } + +// Frame wraps different message types to be sent over a stream. +message Frame { + oneof Message { + DialRequest DialRequest = 1; + Data Data = 2; + } +} + +// DialRequest contains details for connecting to a node. +message DialRequest { + // NodeID is the {UUID}.{ClusterName} of the node to connect to. + string NodeID = 1; + // TunnelType is the type of service being accessed. This differentiates agents that + // create multiple reverse tunnels for different services. + string TunnelType = 2 [ (gogoproto.casttype) = "github.com/gravitational/teleport/api/types.TunnelType" ]; + // Source is the original source address of the client. + Addr Source = 3; + // Destination is the destination address to connect to over the reverse tunnel. + Addr Destination = 4; +} + +message Addr { + // Network is the name of the network transport. + string Network = 1; + // String is the string form of the address. + string String = 2; +} + +// Data contains the raw bytes of a connection. +message Data { bytes Bytes = 1; } +``` + +### How it works + +The following diagram shows a user connecting to a proxy, the user-proxy, and trying to reach a node connected to a different proxy, the node-proxy. Using the DialNode rpc, the user-proxy can create a bidirectional stream to the node through the node-proxy. +``` +┌────┐ ┌──────────┐ +|user|──────connection────────>|user-proxy| +└────┘ └────╥─────┘ + ║ + grpc stream + ║ +┌────┐ ┌────╨─────┐ +|node|─────reverse-tunnel─────>|node-proxy| +└────┘ └──────────┘ +``` + +A call to the DialNode rpc will send an initial frame containing a `DialRequest`. All subsequent frames should contain `Data` messages. Failure scenarios are outlines [here](#failure-scenarios). + +To avoid duplicate work the user-proxy will handle all the typical proxy side logic like authorization and session recording, while the node-proxy will forward the connection directly to the node. + +The DialNode rpc will be wrapped with a client library to return a net.Conn when called. This abstraction allows teleport to treat any underlying transport the same, whether it be a direct dial to the node, a reverse tunnel connected to the user-proxy, or a connection over the DialNode rpc. + +```go +type ProxyClient interface { + DialNode( + proxyIDs []string, + nodeID string, + src net.Addr, + dst net.Addr, + tunnelType types.TunnelType, + ) (net.Conn, error) +} +``` + +### Security +The api will use mTLS to ensure that only other proxies are able to connect. This is done by checking certificates for the built-in role “Proxy”. This will prevent users from connecting to the service directly without going through the user-proxy logic of authorization and session recording. + +### Enabling Proxy Peering +This feature will need to be explicity configured to use it. The configuration will be set in the auth_service section of the teleport config and will update the `ClusterNetworkingConfig` stored in the backend. + +The configuration option will be called `tunnel_strategy`. This will take a `type` and each `type` can support its own custom parameters. This gives us flexibility to support future strategies. The default will be `type: agent_mesh` which is equivalent to the current node dialing behavior. + +The new behavior will be `type: proxy_peering` and will have an optional parameter `agent_connection_count` that configures the number of reverse tunnel connections each agent will create. By default the `agent_connection_count` will be 1. + +The teleport config: +```yaml +auth_service: + ... + tunnel_strategy: + type: proxy_peering + agent_connection_count: 2 + ... +``` + +The `ClusterNetworkingConfig`: +```proto +message ClusterNetworkingConfigSpecV2 { + ... + TunnelStrategyV1 TunnelStrategy = 9 [ (gogoproto.jsontag) = "tunnel_strategy,omitempty" ]; + ... +} + +// TunnelStrategyV1 defines possible tunnel strategy types. +message TunnelStrategyV1 { + oneof Strategy { + AgentMeshTunnelStrategy AgentMesh = 1 [ (gogoproto.jsontag) = "agent_mesh,omitempty" ]; + ProxyPeeringTunnelStrategy ProxyPeering = 2 + [ (gogoproto.jsontag) = "proxy_peering,omitempty" ]; + } +} + +// AgentMeshTunnelStrategy requires reverse tunnels to dial every proxy. +message AgentMeshTunnelStrategy {} + +// ProxyPeeringTunnelStrategy requires reverse tunnels to dial a fixed number of proxies. +message ProxyPeeringTunnelStrategy { + int64 AgentConnectionCount = 1 [ + (gogoproto.jsontag) = "agent_connection_count,omitempty", + (gogoproto.moretags) = "yaml:\"agent_connection_count,omitempty\"" + ]; +} + +``` + +### Peer Address Configuration +The peer address is the address the `ProxyService` GRPC API will be exposed on. This will be configured under proxy_service in the configuration file. If the address is unspecified (`0.0.0.0`) then an address will be discovered using the `GuessHostIP` function in [lib/utils/addr.go](https://github.com/gravitational/teleport/blob/56c536e61f4b52c011b7d18dfaaf2b2c9ecac1cc/lib/utils/addr.go#L281). During startup the proxy will check the `ClusterNetworkingConfig` to see if the `proxy_peering` tunnel strategy is configured before starting the `ProxyService`. A default value of `0.0.0.0:3021` will be used. +```yaml +proxy_service: + ... + peer_listen_addr: 0.0.0.0:3021 + ... +``` +This address will be added to the [ServerSpecV2](https://github.com/gravitational/teleport/blob/95c53ad90e68887778db8141238fee494028bbdf/api/types/types.proto#L364) and stored in the backend. +```protobuf +string PeerAddr = 11 [ (gogoproto.jsontag) = "peer_addr,omitempty" ]; +``` +### Agent Proxy Relationship + +The ID of the proxy an agent is connected to will be added to the [ServerSpecV2](https://github.com/gravitational/teleport/blob/95c53ad90e68887778db8141238fee494028bbdf/api/types/types.proto#L364) along with a Nonce and NonceID to mitigate out of order updates. +```protobuf +string ProxyID = 12 [ (gogoproto.jsontag) = "proxy_id,omitempty" ]; +int64 Nonce = 13 [ (gogoproto.jsontag) = "nonce,omitempty" ]; +int64 NonceID = 14 [ (gogoproto.jsontag) = "nonce_id,omitempty" ]; +``` + +Since each proxy already keeps a cache of `Servers` there will be no additional mechanism required to replicate this information. + +Each agent will be responsible for updating the `ProxyID` as it connects and reconnects to proxy servers. This will be done over the existing periodic heartbeats to the auth server. If the `proxy_peering` tunnel strategy is not configured in the `ClusterNetworkingConfig` the `ProxyID` should not be included. + +The `Nonce` will start at 0 and be incremented with each update sent to the auth server. On each restart of the teleport agent a new `NonceID` will be randomly generated. The auth server will reject any updates where the `heartbeat.nonce < previous.nonce && heartbeat.nonce_id == previous.nonce_id`. + +### API Clients +Each proxy will need to manage multiple grpc clients, one for each peer proxy. Client connections will be created as peer proxies are discovered. Similar to the agent pools current behavior, clients can be expired if the connection fails and the peer hasn't heartbeated to the backend for a certain amount of time. + +Transient connection failures can be detected using [GRPC keepalives](https://pkg.go.dev/google.golang.org/grpc/keepalive) along with the client [`WaitForStateChange`](https://pkg.go.dev/google.golang.org/grpc#ClientConn.WaitForStateChange) API. The time it takes to detect a dead connection is determined by the keepalive `Timeout` parameter. The grpc client will automatically try to reconnect with an exponential backoff policy. + +For future backwards compatibility the proxy teleport version will be included in the grpc client/server headers. This will allow either a client or server to downgrade messages accordingly. + +Metrics will be added so we can monitor whether a single grpc connection becomes a bottleneck for many concurrent streams. The following metrics will be tracked: + +1. Total throughput in bytes, this is the aggregate number of bytes sent over all grpc channels on a single connection. +2. Number of concurrent streams, this is the number of streams at any instant. +3. Total number of streams, this is the total number of streams including both current and past streams. + +With these metrics we will be able to see if throughput begins to flatten as more streams are being used. If this does become an issue additional tcp connections will need to be created. + +### Reverse Tunnel Agent Pool +Changes to the reverse tunnel agent and agent pool are required to support this design. The existing implementation creates a connection to every proxy. The new impelementation will decide how many connections to create dynamically based on the `ClusterNetworkingConfig`. If the `proxy_peering` tunnel strategy is configured the agent will try to create the configured number of connections. If the `agent_mesh` tunnel strategy is configured then a connection to every proxy will be created. Old agents can continue connecting to every proxy regardless of the tunnel strategy. + +As mentioned above the `proxy_peering` tunnel strategy will have a default `agent_connection_count: 1`. This is more likedly to lead to unavailability to a subset of agents during network partitions and cluster upgrades. To help minimize this a higher `agent_connection_count` can be configured to increase the likelyhoood that an agent is reachable during these events. + +The `proxy_peering` strategy with a fixed `agent_connection_count` is an improvement over the `agent_mesh` strategy as it allows proxy servers to scale up without impacting the number of connections agents maintain. + +### Trusted Clusters +Leaf clusters will continue connecting to all proxies in the root cluster. Supporting trusted clusters would add a non-trivial amount of work and complexity to this design and provides diminishing returns. It is expected that trusted clusters will not be connected at the same scale as other resouces like ssh nodes and therefore will not be a big contributer to the problems we are trying to address here. + +### Cluster Upgrade +Upgrading a cluster to support this feature will require reconfiguration the auth service as follows: +```yaml +auth_service: +... + type: proxy_peering +... +``` + +Then each proxy will need to be restarted. This will allow the proxy to see the new `ClusterNetworkingConfig` and start the `ProxyService`. + +When an agent reconnects it will discover the new `ClusterNetworkingConfig` and begin creating the configured number of connections back to the proxies. + +### Failure Scenarios +This design introduces several new points of failure on the path from a client to a node agent. + +1. Failure to dial the node-proxy. +2. Node agent not connected to the expected node-proxy. +3. Proxy tunnel grpc client disconnects. +4. Node agent disconnects during dial/session over proxy tunnel. + +These failures will be presented to the client as follows: + +1 and 2 will display a message similar to what is returned [here](https://github.com/gravitational/teleport/blob/9edf72b86fd192ca965e65db60fb92c6858a314d/lib/reversetunnel/localsite.go#L314-L322) to indicate the node agent is offline or disconnected. + +3 and 4 will have the same behavior as a node agent disconnecting unexpectedly with the current implementation. This results in an [ExitMissingError](https://pkg.go.dev/golang.org/x/crypto/ssh#ExitMissingError) being displayed client side. + +### TLS Routing +Load balancers between the agent and proxy servers may want to diffentiate between old agents that need to connect to every proxy and the new agents described in this document. This is important for geo distributed deployments to ensure low latency routing. + +The cluster must be configure with `proxy_listener_mode: multiplex` to enable TLS ALPN routing. New agents will add an additional protocol `teleport-reversetunnelv2` to the ALPN header field resulting in the following list: `["teleport-reversetunnelv2", "teleport-reversetunnel"]`. + +Preserving `teleport-reversetunnel` in the list of protocols, ensures that new agents are able to connect to proxies running an older version of teleport. + +## Alternative Considerations + +### Node Tracker +The orginal proposal included a separate service for tracking which proxy each node was connected to. This was ultimately decided against. The service was proposed to target scalability goals that need to be addressed in other parts of the system first. Given these limitations a simpler design was chosen to benefit the most customers. Further discussions on the node tracker proposal can be found [here](https://github.com/gravitational/teleport/pull/9121). + +### Client Redirect +An alternative approach was considered to redirect clients to the corresponding node-proxy. This was ultimately disregarded for a couple of reasons. It increases the time to establish a session for the client as a client would need to dial and authenticate with two proxies. Proxies would need to be individually addressible by the client which makes them an easier targets for DDOS attacks. diff --git a/rfd/0070-tctl-sso-configure-command.md b/rfd/0070-tctl-sso-configure-command.md new file mode 100644 index 0000000000000..b44d517eb3e1a --- /dev/null +++ b/rfd/0070-tctl-sso-configure-command.md @@ -0,0 +1,205 @@ +--- +authors: Krzysztof Skrzętnicki +state: draft +--- + +# RFD 70 - `tctl sso configure` command + +## What + +This RFD proposes a new subcommand for the `tctl` tool: `sso configure`. This is a convenience command to help with the +configuration of auth connectors. The command is an alternative to directly editing the auth connector resource files +and provides automated validation and sanity checks. + +Not everything can be checked by the tool, as the overall validity of the SSO setup depends in part on the IdP +configuration which is invisible for us. This is why the output of the command should be tested with `tctl sso test` +command, e.g. by piping the output from one to another: `tctl sso configure ... | tctl sso test`. + +The input is provided exclusively via command flags. In particular, no "guided" or "interactive" experience is provided. +This is a deliberate choice, as the web UI admin interface will be a better hosting environment for such a feature. + +Ultimately we would like to support all providers for which we have +detailed [SSO how-to pages](../docs/pages/enterprise/sso). Initial scope covers only SAML. + +## Why + +We want to simplify the task of configuring a new auth connector. In contrast with free-form text editing of resource, +this command line tool will only output well-formed, validated auth connector. Whenever possible, the tool will +automatically fill in the fields, e.g. by fetching the Proxy public address and calculating the respective webapi +endpoint. + +Additionally, the command provides a foundation for creating similar functionality in web-based UI. + +## UX + +Each connector kind is served by a separate subcommand. Most subcommand flags are not shared. + +```bash +$ tctl sso configure --help + +Create auth connector configuration. + +... + +Commands: + sso configure saml Configure SAML connector, optionally using a preset. Available presets: [okta onelogin ad adfs] + sso configure oidc Configure OIDC auth connector, optionally using a preset. + sso configure github Configure GitHub auth connector. +``` + +### Flags: SAML + +For `SAML` the commonly used flags will be: + +``` +-p, --preset Preset. One of: [okta onelogin ad adfs] +-n, --name Connector name. Required, unless implied from preset. +-e, --entity-descriptor Set the Entity Descriptor. Valid values: file, URL, XML content. Supplies configuration parameters as single XML instead of individual elements. +-a, --attributes-to-roles Sets attribute-to-role mapping in the form 'attr_name,attr_value,role1,role2,...'. Repeatable. + --display Display controls how this connector is displayed. +``` + +The `--attributes-to-roles/-a` flag is particularly important as it is used to provide a mapping between the +IdP-provided attributes and Teleport roles. It can be specified multiple times. + +Alternatives to `--entity-descriptor/-e` flag, allowing to specify these values explicitly: + +``` +--issuer Issuer is the identity provider issuer. +--sso SSO is the URL of the identity provider's SSO service. +--cert Cert is the identity provider certificate PEM. IDP signs responses using this certificate. +--cert-file Like --cert, but read the cert from file. +``` + +Rarely used flags; the tool fills that information automatically: + +``` +--acs AssertionConsumerService is a URL for assertion consumer service on the service provider (Teleport's side). +--audience Audience uniquely identifies our service provider. +--service-provider-issuer ServiceProviderIssuer is the issuer of the service provider (Teleport). +``` + +Advanced features: + +- assertion encryption (support varies by IdP) +- externally-provided signing key (by default Teleport will self-issue this) +- overrides for specific IdP providers + +``` +--signing-key-file A file with request signing key. Must be used together with --signing-cert-file. +--signing-cert-file A file with request certificate. Must be used together with --signing-key-file. +--assertion-key-file A file with key used for securing SAML assertions. Must be used together with --assertion-cert-file. +--assertion-cert-file A file with cert used for securing SAML assertions. Must be used together with --assertion-key-file. +--provider Sets the external identity provider type. Examples: ping, adfs. +``` + +Flags for ignoring warnings: + +``` +--ignore-missing-roles Ignore non-existing roles referenced in --attributes-to-roles. +--ignore-missing-certs Ignore the lack of valid certificates from -e and --cert. +``` + +Available presets (`--preset/-p`): + +| Name | Description | Display | +|------------|--------------------------------------|-----------| +| `okta` | Okta | Okta | +| `onelogin` | OneLogin | OneLogin | +| `ad` | Azure Active Directory | Microsoft | +| `adfs` | Active Directory Federation Services | ADFS | + +Examples: + +1) Generate SAML auth connector configuration named `myauth`. + +- members of `admin` group will receive `access`, `editor` and `audit` role. +- members of `developer` group will receive `access` role. +- the IdP metadata will be read from `entity-desc.xml` file. + +``` +$ tctl sso configure saml -n myauth -a groups,admin,access,editor,audit -a group,developer,access -e entity-desc.xml +``` + +2) Generate SAML auth connector configuration using `okta` preset. + +- The choice of preset affects default name, display attribute and may apply IdP-specific tweaks. +- Instead of XML file, a URL was provided to `-e` flag, which will be fetched by Teleport during runtime. + +``` +$ tctl sso configure saml -p okta -a group,dev,access -e https://dev-123456.oktapreview.com/app/ex30h8/sso/saml/metadata +``` + +3) Generate the configuration and immediately test it using `tctl sso test` command. + +``` +$ tctl sso configure saml -p okta -a group,developer,access -e entity-desc.xml | tctl sso test +``` + +Full flag reference: `tctl sso configure saml --help`. + +### Flags: OIDC + +_This section will be filled with expanded implementation scope._ + +### Flags: GitHub + +_This section will be filled with expanded implementation scope._ + +## Security + +The command will never modify existing Teleport configuration. This will be done by the user by further invocation +of `tctl create` command or by other means. + +The command may handle user secrets. The implementation will ensure these are not silently written anywhere (e.g. we +must never save partially filled-in configuration files to temp files). + +## Further work + +### Full provider coverage + +We should extend the set of supported providers to match [SSO how-to pages](../docs/pages/enterprise/sso). + +### Integration with individual IdPs + +We create IdP-specific integration code, which would reduce the expected configuration time for given IdP. The +feasibility of integration is likely to vary greatly depending on specific IdP. + +For example, we may use the Okta CLI tool: + +- [blog](https://developer.okta.com/blog/2020/12/10/introducing-okta-cli) +- [installation](https://cli.okta.com/) +- [source code](https://github.com/okta/okta-cli). + +```bash +$ okta start spring-boot +Registering for a new Okta account, if you would like to use an existing account, use 'okta login' instead. + +First name: Jamie +Last name: Example +Email address: jamie@example.com +Company: Okta Test Company +Creating new Okta Organization, this may take a minute: +OrgUrl: https://dev-123456.okta.com +An email has been sent to you with a verification code. + +Check your email +Verification code: 086420 +New Okta Account created! +Your Okta Domain: https://dev-908973.okta.com +To set your password open this link: +https://dev-908973.okta.com/reset_password/drpFaK66lHuY4d1WbrFP?fromURI=/ + +Configuring a new OIDC Application, almost done: +Created OIDC application, client-id: 0oazahf9k5LDCx32C4x6 + +Change the directory: + cd spring-boot + +Okta configuration written to: src/main/resources/application.properties +Don't EVER commit src/main/resources/application.properties into source control + +Run this application with: + ./mvnw spring-boot:run +``` + diff --git a/rfd/0071-tctl-sso-test-command.md b/rfd/0071-tctl-sso-test-command.md new file mode 100644 index 0000000000000..d0a79b0f7bb94 --- /dev/null +++ b/rfd/0071-tctl-sso-test-command.md @@ -0,0 +1,278 @@ +--- +authors: Krzysztof Skrzętnicki +state: draft +--- + +# RFD 71 - `tctl sso test` command + +## What + +This RFD proposes new subcommand for the `tctl` tool: `sso test`. The purpose of this command is to perform validation +of auth connector in SSO flow prior to creating the connector resource with `tctl create ...` command. + +To accomplish that the definition of auth connector is read from file and attached to the auth request being made. The +Teleport server uses the attached definition to proceed with the flow, instead of using any of the stored auth connector +definitions. + +The login flow proceeds as usual, with some exceptions: + +- Embedded definition of auth connector will be used whenever applicable. +- The client key is not signed at the end of successful flow, so no actual login will be performed. +- During the flow the diagnostic information is captured and stored in the backend, where it can be retrieved by using + the auth request ID as the key. + +The following kinds of auth connectors will be supported: + +- SAML (Enterprise only) +- OIDC (Enterprise only) +- Github + +## Why + +Currently, Teleport offers no mechanism for testing the SSO flows prior to creating the connector, at which point the +connector is immediately accessible for everyone. Having a dedicated testing flow using single console terminal for +initiating the test and seeing the results would improve the usability and speed at which the changes to connectors can +be iterated. Decreased SSO configuration time contributes to improved "time-to-first-value" metric of Teleport. + +The testing capabilities would be especially useful for Teleport Cloud, as currently the administrator is running a risk +of locking themselves out of Teleport cluster. + +## Details + +### UX + +The user initiates the flow by issuing command such as `tctl --proxy= sso test `. The +resource is loaded, and it's kind is determined (SAML/OIDC/GitHub). If the connector kind is supported, the +browser-based SSO flow is initiated. + +Once the flow is finished, either successfully or not, the tool notifies the user of this fact. This is a change from +current behaviour of `tsh login`, where only successful flow are terminated in non-timeout manner. After the flow is +finished, the user is provided with a wealth of diagnostic information. Depending on the scenario a different levels of +verbosity are applied; the highest level is available by using the `--debug` flag. + +The flow is carried out mostly via web browser, + +In the same manner as `tsh login --auth=` opens the browser to perform the login, the user is redirected to +the browser as well. Once the flow is finished in any way, the user is notified of that fact along with any debugging +information that has been passed by the server (e.g. claims, mapped roles, ...). + +### Example runs + +- Successful test: + +``` +$ tctl sso test connector-good.yaml + +If browser window does not open automatically, open it by clicking on the link: + http://127.0.0.1:65228/ef343c31-cc9f-4105-a2c7-c490463f1b96 +SSO flow succeeded! Logged in as: example@gravitational.io +-------------------------------------------------------------------------------- +Authentication details: +- username: example@gravitational.io +- roles: [editor auditor access] +- traits: map[groups:[Everyone okta-admin okta-dev] username:[example@gravitational.io]] + +-------------------------------------------------------------------------------- +[SAML] Attributes to roles: +key: SAML.attributesToRoles +value: + - name: groups + roles: + - editor + - auditor + value: okta-admin + - name: groups + roles: + - access + value: okta-dev + +-------------------------------------------------------------------------------- +[SAML] Attributes statements: +key: SAML.attributeStatements +value: + groups: + - Everyone + - okta-admin + - okta-dev + username: + - example@gravitational.io + +-------------------------------------------------------------------------------- +For more details repeat the command with --debug flag. +``` + +- Mapping to a role which does not exist: + +``` +> tctl sso test connector-bad-mapping.yaml + +If browser window does not open automatically, open it by clicking on the link: + http://127.0.0.1:65239/9da0c11c-824f-473a-8c3f-16c4543e7845 +SSO flow failed! Login error: sso flow failed, error: role XXX-DOES_NOT_EXIST is not found +-------------------------------------------------------------------------------- +Error details: Failed to calculate user attributes. + +Details: [role XXX-DOES_NOT_EXIST is not found] +-------------------------------------------------------------------------------- +[SAML] Attributes to roles: +key: SAML.attributesToRoles +value: + - name: groups + roles: + - access + - XXX-DOES_NOT_EXIST + value: okta-admin + +-------------------------------------------------------------------------------- +[SAML] Attributes statements: +key: SAML.attributeStatements +value: + groups: + - Everyone + - okta-admin + - okta-dev + username: + - example@gravitational.io + +-------------------------------------------------------------------------------- +For more details repeat the command with --debug flag. +``` + +- Bad connector: + +``` +> tctl sso test connector-no-roles.yaml + +ERROR: Unable to load SAML connector. Correct the definition and try again. Details: attributes_to_roles is empty, authorization with connector would never give any roles. +``` + +``` +> tctl sso test connector-malformed-entity-desc.yaml + +ERROR: Unable to load SAML connector. Correct the definition and try again. Details: no SSO set either explicitly or via entity_descriptor spec. +``` + +``` +> tctl sso test connector-missing-entity-desc.yaml +------------------------------------------------------------------------------ + +ERROR: Unable to load SAML connector. Correct the definition and try again. Details: no entity_descriptor set, either provide entity_descriptor or entity_descriptor_url in spec. +``` + +- Bad ACS + +Note: this error is less likely to occur if `tctl sso configure` command is used, as it sets the ACS automatically. + +``` +> tctl sso test connector-bad-acs.yaml + +If browser window does not open automatically, open it by clicking on the link: + http://127.0.0.1:65288/6c5fd054-9055-4f53-a8fe-c5d49f64f77e +SSO flow failed! Login error: sso flow failed, error: received response with incorrect or missing attribute statements, please check the identity provider configuration to make sure that mappings for claims/attribute statements are set up correctly. , failed to retrieve SAML assertion info from response: error validating response: Unrecognized Destination value, Expected: https://teleport.example.com:8080/v1/webapi/saml/acs, Actual: https://teleport.example.com:3080/v1/webapi/saml/acs. +-------------------------------------------------------------------------------- +Error details: Failed to retrieve assertion info. This may indicate IdP configuration error. + +Details: [received response with incorrect or missing attribute statements, please check the identity provider configuration to make sure that mappings for claims/attribute statements are set up correctly. , failed to retrieve SAML assertion info from response: error validating response: Unrecognized Destination value, Expected: https://teleport.example.com:8080/v1/webapi/saml/acs, Actual: https://teleport.example.com:3080/v1/webapi/saml/acs.] +-------------------------------------------------------------------------------- +For more details repeat the command with --debug flag. +``` + +- Missing IdP certificate + +``` +> tctl sso test connector-no-cert.yaml + +ERROR: Failed to create auth request. Check the auth connector definition for errors. Error: no identity provider certificate provided, either set certificate as a parameter or via entity_descriptor +``` + +- Incorrect group mapping on IdP side results in no attributes being passed in a claim. + +Note: This is an error on IdP configuration side which we cannot see directly. + +``` +> tctl sso test connector-bad-idp-group-config.yaml + +If browser window does not open automatically, open it by clicking on the link: + http://127.0.0.1:65325/d87df69d-d5d6-427a-bb9e-056ee20b4b90 +SSO flow failed! Login error: sso flow failed, error: received response with incorrect or missing attribute statements, please check the identity provider configuration to make sure that mappings for claims/attribute statements are set up correctly. , failed to retrieve SAML assertion info from response: missing AttributeStatement element. +-------------------------------------------------------------------------------- +Error details: Failed to retrieve assertion info. This may indicate IdP configuration error. + +Details: [received response with incorrect or missing attribute statements, please check the identity provider configuration to make sure that mappings for claims/attribute statements are set up correctly. , failed to retrieve SAML assertion info from response: missing AttributeStatement element.] +-------------------------------------------------------------------------------- +For more details repeat the command with --debug flag. +``` + +- The IdP is expecting a different certificate than the one we have in auth connector spec. + + +Note: This is an error on IdP configuration side which we cannot see directly. + +``` +> tctl sso test connector-bad-idp-cert-config.yaml +------------------------------------------------------------------------------ + +If browser window does not open automatically, open it by clicking on the link: +http://127.0.0.1:65333/3c9ac37a-1fb5-4160-8740-e04e4b18ea46 +SSO flow failed! Login error: sso flow failed, error: received response with incorrect or missing attribute statements, please check the identity provider configuration to make sure that mappings for claims/attribute statements are set up correctly. , failed to retrieve SAML assertion info from response: error validating response: unable to decrypt encrypted assertion: cannot decrypt, error retrieving private key: key decryption attempted with mismatched cert, SP cert(11:35:70:c1), assertion cert(f1:e3:25:c6). +-------------------------------------------------------------------------------- +Error details: Failed to retrieve assertion info. This may indicate IdP configuration error. + +Details: [received response with incorrect or missing attribute statements, please check the identity provider configuration to make sure that mappings for claims/attribute statements are set up correctly. , failed to retrieve SAML assertion info from response: error validating response: unable to decrypt encrypted assertion: cannot decrypt, error retrieving private key: key decryption attempted with mismatched cert, SP cert(11:35:70:c1), assertion cert(f1:e3:25:c6).] +-------------------------------------------------------------------------------- +For more details repeat the command with --debug flag. +``` + +### Passing details to SSO callback on failed login + +Currently, in case of SSO error, the console client is not informed of failure. Unless interrupted (Ctrl-C/SIGINT) the +request will simply time out after few minutes: + +``` +If browser window does not open automatically, open it by clicking on the link: +http://127.0.0.1:59544/452e0ecc-797d-488e-a9be-ffc23b21fcf8 +DEBU [CLIENT] Timed out waiting for callback after 3m0s. client/weblogin.go:324 +``` + +The user can tell the SSO flow has failed because they should see an error page in the browser. They still have to +manually interrupt the `tsh login` command, which is suboptimal and may confuse users. + +We want to change this behaviour for **ALL** SSO flows (both testing ones and normal ones) so that client callback is +always called. In case of failure the callback will omit the login information, but we may include the error +information. The updated `tsh` clients will correctly receive that error and will be able to display it to the user. The +old `tsh` clients will also terminate the flow, but will lack the ability to display a detailed error message. + +### Implementation details + +There are several conceptual pieces to the implementation: + +1. Extending auth requests with embedded connector details. The auth requests (`SAMLAuthRequest`,`OIDCAuthRequest` + ,`GithubAuthRequest`) will gain two new fields: boolean `TestFlow` flag and optional auth connector spec. +2. Creating the extended auth requests. Currently, the auth requests are created by calling unauthenticated endpoints ( + one per auth kind): `/webapi/{oidc,saml,github}/login/console`. These endpoints will *not* + change. For security reasons, we don't want unauthenticated users to initiate SSO test flows. Instead, the requests + will be created using authenticated API call such as `CreateSAMLAuthRequest`. +3. Making the backend aware of the testing flow. Right now there is no concept of "dry run" login flow. All logins are + deemed "real" and assumed to be made against existing connectors. We want to avoid issuing actual certificates, + adjust the audit log events and make use of embedded connector spec. +4. Adding the API for collecting the SSO diagnostic info, keyed by particular auth request IDs. This is functionality + potentially useful outside of test flows, e.g. by extending the web ui with diagnostic panel for recent SSO logins. + This API will be queried for information after the test flow terminates. +5. Calling the `ClientRedirectURL` in the case of SSO flow failure, but with an error message. + +The implementation of this RFD for different kinds of connectors should be largely independent. As such, the first +iteration will implement this functionality for SAML, while the lessons learned will help shape the implementations for +OIDC and GitHub. + +### Security + +We consider the following potential security problems arising from the implementation of this feature. + +| Problem | Mitigation | +|---------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| 1. Unauthorized user initiating the test flow | Creating auth request with TestFlow flag will require elevated permissions, equivalent to those required to create the auth connector directly. | +| 2. Malicious definition of auth connector. | The admin must be able to create real connectors already. The test ones are strictly less powerful. | +| 3. Auth connector secrets leak via logs. | Careful review of logging to ensure we don't log auth connector spec directly or when embedded in auth request. | +| 4. Auth connector secrets leak via access request. | Auth connector secrets are embedded in auth request. Any user capable of accessing this auth request will be able to read those secrets. We already secure the access to auth requests with RBAC. Additionally, the auth requests are short-lived and their ID is random UUID. | +| 5. Other info leak in the logs. | Logging code review to ensure sensitive information is not logged. | +| 6. Logic flaw resulting in test flow issuing real auth. | Code review to ensure the logic is not flawed. | diff --git a/tool/tbot/config/config_destination.go b/tool/tbot/config/config_destination.go index 8e986a824a8a1..30106522289ef 100644 --- a/tool/tbot/config/config_destination.go +++ b/tool/tbot/config/config_destination.go @@ -21,6 +21,30 @@ import ( "github.com/gravitational/trace" ) +// DatabaseConfig is the config for a database access request. +type DatabaseConfig struct { + // Service is the service name of the Teleport database. Generally this is + // the name of the Teleport resource. + Service string `yaml:"service,omitempty"` + + // Database is the name of the database to request access to. + Database string `yaml:"database,omitempty"` + + // Username is the database username to request access as. + Username string `yaml:"username,omitempty"` +} + +func (dc *DatabaseConfig) CheckAndSetDefaults() error { + if dc.Service == "" { + return trace.BadParameter("database `service` field must specify a database service name") + } + + // Note: tsh has special checks for MongoDB and Redis. We don't know the + // protocol at this point so we'll need to defer those checks. + + return nil +} + // DestinationConfig configures a user certificate destination. type DestinationConfig struct { DestinationMixin `yaml:",inline"` @@ -28,6 +52,8 @@ type DestinationConfig struct { Roles []string `yaml:"roles,omitempty"` Kinds []identity.ArtifactKind `yaml:"kinds,omitempty"` Configs []TemplateConfig `yaml:"configs,omitempty"` + + Database *DatabaseConfig `yaml:"database,omitempty"` } // destinationDefaults applies defaults for an output sink's destination. Since @@ -42,6 +68,12 @@ func (dc *DestinationConfig) CheckAndSetDefaults() error { return trace.Wrap(err) } + if dc.Database != nil { + if err := dc.Database.CheckAndSetDefaults(); err != nil { + return trace.Wrap(err) + } + } + // Note: empty roles is allowed; interpreted to mean "all" at generation // time diff --git a/tool/tbot/main.go b/tool/tbot/main.go index f428a1c6a42a9..8928eba7c33b6 100644 --- a/tool/tbot/main.go +++ b/tool/tbot/main.go @@ -21,31 +21,20 @@ import ( "context" "crypto/sha256" "encoding/hex" - "fmt" "os" "os/signal" - "strings" "syscall" "time" "github.com/gravitational/teleport" - "github.com/gravitational/teleport/api/client/proto" - "github.com/gravitational/teleport/api/constants" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/auth" - "github.com/gravitational/teleport/lib/auth/authclient" - "github.com/gravitational/teleport/lib/auth/native" - "github.com/gravitational/teleport/lib/client" - "github.com/gravitational/teleport/lib/services" - "github.com/gravitational/teleport/lib/tlsca" "github.com/gravitational/teleport/lib/utils" "github.com/gravitational/teleport/tool/tbot/config" - "github.com/gravitational/teleport/tool/tbot/destination" "github.com/gravitational/teleport/tool/tbot/identity" "github.com/gravitational/trace" "github.com/kr/pretty" "github.com/sirupsen/logrus" - "golang.org/x/crypto/ssh" ) var log = logrus.WithFields(logrus.Fields{ @@ -373,481 +362,3 @@ func watchCARotations(watcher types.Watcher) { } } } - -func getIdentityFromToken(cfg *config.BotConfig) (*identity.Identity, error) { - if cfg.Onboarding == nil { - return nil, trace.BadParameter("onboarding config required via CLI or YAML") - } - if cfg.Onboarding.Token == "" { - return nil, trace.BadParameter("unable to start: no token present") - } - addr, err := utils.ParseAddr(cfg.AuthServer) - if err != nil { - return nil, trace.WrapWithMessage(err, "invalid auth server address %+v", cfg.AuthServer) - } - - tlsPrivateKey, sshPublicKey, tlsPublicKey, err := generateKeys() - if err != nil { - return nil, trace.WrapWithMessage(err, "unable to generate new keypairs") - } - - log.Info("Attempting to generate new identity from token") - params := auth.RegisterParams{ - Token: cfg.Onboarding.Token, - ID: auth.IdentityID{ - Role: types.RoleBot, - }, - Servers: []utils.NetAddr{*addr}, - PublicTLSKey: tlsPublicKey, - PublicSSHKey: sshPublicKey, - CAPins: cfg.Onboarding.CAPins, - CAPath: cfg.Onboarding.CAPath, - GetHostCredentials: client.HostCredentials, - JoinMethod: cfg.Onboarding.JoinMethod, - } - certs, err := auth.Register(params) - if err != nil { - return nil, trace.Wrap(err) - } - sha := sha256.Sum256([]byte(params.Token)) - tokenHash := hex.EncodeToString(sha[:]) - ident, err := identity.ReadIdentityFromStore(&identity.LoadIdentityParams{ - PrivateKeyBytes: tlsPrivateKey, - PublicKeyBytes: sshPublicKey, - TokenHashBytes: []byte(tokenHash), - }, certs, identity.BotKinds()...) - return ident, trace.Wrap(err) -} - -func renewIdentityViaAuth( - ctx context.Context, - client auth.ClientI, - currentIdentity *identity.Identity, - cfg *config.BotConfig, -) (*identity.Identity, error) { - // TODO: enforce expiration > renewal period (by what margin?) - - // If using the IAM join method we always go through the initial join flow - // and fetch new nonrenewable certs - var joinMethod types.JoinMethod - if cfg.Onboarding != nil { - joinMethod = cfg.Onboarding.JoinMethod - } - switch joinMethod { - case types.JoinMethodIAM: - ident, err := getIdentityFromToken(cfg) - return ident, trace.Wrap(err) - default: - } - - // Ask the auth server to generate a new set of certs with a new - // expiration date. - certs, err := client.GenerateUserCerts(ctx, proto.UserCertsRequest{ - PublicKey: currentIdentity.PublicKeyBytes, - Username: currentIdentity.X509Cert.Subject.CommonName, - Expires: time.Now().Add(cfg.CertificateTTL), - }) - if err != nil { - return nil, trace.Wrap(err) - } - - newIdentity, err := identity.ReadIdentityFromStore( - currentIdentity.Params(), - certs, - identity.BotKinds()..., - ) - if err != nil { - return nil, trace.Wrap(err) - } - - return newIdentity, nil -} - -// fetchDefaultRoles requests the bot's own role from the auth server and -// extracts its full list of allowed roles. -func fetchDefaultRoles(ctx context.Context, roleGetter services.RoleGetter, botRole string) ([]string, error) { - role, err := roleGetter.GetRole(ctx, botRole) - if err != nil { - return nil, trace.Wrap(err) - } - - conditions := role.GetImpersonateConditions(types.Allow) - return conditions.Roles, nil -} - -// describeTLSIdentity writes an informational message about the given identity to -// the log. -func describeTLSIdentity(ident *identity.Identity) (string, error) { - cert := ident.X509Cert - if cert == nil { - return "", trace.BadParameter("attempted to describe TLS identity without TLS credentials") - } - - tlsIdent, err := tlsca.FromSubject(cert.Subject, cert.NotAfter) - if err != nil { - return "", trace.Wrap(err, "bot TLS certificate can not be parsed as an identity") - } - - var principals []string - for _, principal := range tlsIdent.Principals { - if !strings.HasPrefix(principal, constants.NoLoginPrefix) { - principals = append(principals, principal) - } - } - - duration := cert.NotAfter.Sub(cert.NotBefore) - return fmt.Sprintf( - "valid: after=%v, before=%v, duration=%s | kind=tls, renewable=%v, disallow-reissue=%v, roles=%v, principals=%v, generation=%v", - cert.NotBefore.Format(time.RFC3339), - cert.NotAfter.Format(time.RFC3339), - duration, - tlsIdent.Renewable, - tlsIdent.DisallowReissue, - tlsIdent.Groups, - principals, - tlsIdent.Generation, - ), nil -} - -// describeSSHIdentity writes an informational message about the given SSH -// identity to the log. -func describeSSHIdentity(ident *identity.Identity) (string, error) { - cert := ident.SSHCert - if cert == nil { - return "", trace.BadParameter("attempted to describe SSH identity without SSH credentials") - } - - renewable := false - if _, ok := cert.Extensions[teleport.CertExtensionRenewable]; ok { - renewable = true - } - - disallowReissue := false - if _, ok := cert.Extensions[teleport.CertExtensionDisallowReissue]; ok { - disallowReissue = true - } - - var roles []string - if rolesStr, ok := cert.Extensions[teleport.CertExtensionTeleportRoles]; ok { - if actualRoles, err := services.UnmarshalCertRoles(rolesStr); err == nil { - roles = actualRoles - } - } - - var principals []string - for _, principal := range cert.ValidPrincipals { - if !strings.HasPrefix(principal, constants.NoLoginPrefix) { - principals = append(principals, principal) - } - } - - duration := time.Second * time.Duration(cert.ValidBefore-cert.ValidAfter) - return fmt.Sprintf( - "valid: after=%v, before=%v, duration=%s | kind=ssh, renewable=%v, disallow-reissue=%v, roles=%v, principals=%v", - time.Unix(int64(cert.ValidAfter), 0).Format(time.RFC3339), - time.Unix(int64(cert.ValidBefore), 0).Format(time.RFC3339), - duration, - renewable, - disallowReissue, - roles, - principals, - ), nil -} - -// renew performs a single renewal -func renew( - ctx context.Context, cfg *config.BotConfig, client auth.ClientI, - ident *identity.Identity, botDestination destination.Destination, -) (auth.ClientI, *identity.Identity, error) { - // Make sure we can still write to the bot's destination. - if err := identity.VerifyWrite(botDestination); err != nil { - return nil, nil, trace.Wrap(err, "Cannot write to destination %s, aborting.", botDestination) - } - - log.Debug("Attempting to renew bot certificates...") - newIdentity, err := renewIdentityViaAuth(ctx, client, ident, cfg) - if err != nil { - return nil, nil, trace.Wrap(err) - } - - identStr, err := describeTLSIdentity(ident) - if err != nil { - return nil, nil, trace.Wrap(err, "Could not describe bot identity at %s", botDestination) - } - - log.Infof("Successfully renewed bot certificates, %s", identStr) - - // TODO: warn if duration < certTTL? would indicate TTL > server's max renewable cert TTL - // TODO: error if duration < renewalInterval? next renewal attempt will fail - - // Immediately attempt to reconnect using the new identity (still - // haven't persisted the known-good certs). - newClient, err := authenticatedUserClientFromIdentity(ctx, newIdentity, cfg.AuthServer) - if err != nil { - return nil, nil, trace.Wrap(err) - } - - // Attempt a request to make sure our client works. - // TODO: consider a retry/backoff loop. - if _, err := newClient.Ping(ctx); err != nil { - return nil, nil, trace.Wrap(err, "unable to communicate with auth server") - } - - log.Debug("Auth client now using renewed credentials.") - client = newClient - ident = newIdentity - - // Now that we're sure the new creds work, persist them. - if err := identity.SaveIdentity(newIdentity, botDestination, identity.BotKinds()...); err != nil { - return nil, nil, trace.Wrap(err) - } - - // Determine the default role list based on the bot role. The role's - // name should match the certificate's Key ID (user and role names - // should all match bot-$name) - botResourceName := ident.X509Cert.Subject.CommonName - defaultRoles, err := fetchDefaultRoles(ctx, client, botResourceName) - if err != nil { - log.WithError(err).Warnf("Unable to determine default roles, no roles will be requested if unspecified") - defaultRoles = []string{} - } - - // Next, generate impersonated certs - expires := ident.X509Cert.NotAfter - for _, dest := range cfg.Destinations { - destImpl, err := dest.GetDestination() - if err != nil { - return nil, nil, trace.Wrap(err) - } - - // Check the ACLs. We can't fix them, but we can warn if they're - // misconfigured. We'll need to precompute a list of keys to check. - // Note: This may only log a warning, depending on configuration. - if err := destImpl.Verify(identity.ListKeys(dest.Kinds...)); err != nil { - return nil, nil, trace.Wrap(err) - } - - // Ensure this destination is also writable. This is a hard fail if - // ACLs are misconfigured, regardless of configuration. - // TODO: consider not making these a hard error? e.g. write other - // destinations even if this one is broken? - if err := identity.VerifyWrite(destImpl); err != nil { - return nil, nil, trace.Wrap(err, "Could not write to destination %s, aborting.", destImpl) - } - - var desiredRoles []string - if len(dest.Roles) > 0 { - desiredRoles = dest.Roles - } else { - log.Debugf("Destination specified no roles, defaults will be requested: %v", defaultRoles) - desiredRoles = defaultRoles - } - - impersonatedIdent, err := generateImpersonatedIdentity(ctx, client, ident, expires, desiredRoles, dest.Kinds) - if err != nil { - return nil, nil, trace.Wrap(err, "Failed to generate impersonated certs for %s: %+v", destImpl, err) - } - - var impersonatedIdentStr string - if dest.ContainsKind(identity.KindTLS) { - impersonatedIdentStr, err = describeTLSIdentity(impersonatedIdent) - if err != nil { - return nil, nil, trace.Wrap(err, "could not describe impersonated certs for destination %s", destImpl) - } - } else { - // Note: kinds must contain at least 1 of TLS or SSH - impersonatedIdentStr, err = describeSSHIdentity(impersonatedIdent) - if err != nil { - return nil, nil, trace.Wrap(err, "could not describe impersonated certs for destination %s", destImpl) - } - } - log.Infof("Successfully renewed impersonated certificates for %s, %s", destImpl, impersonatedIdentStr) - - if err := identity.SaveIdentity(impersonatedIdent, destImpl, dest.Kinds...); err != nil { - return nil, nil, trace.Wrap(err, "failed to save impersonated identity to destination %s", destImpl) - } - - for _, templateConfig := range dest.Configs { - template, err := templateConfig.GetConfigTemplate() - if err != nil { - return nil, nil, trace.Wrap(err) - } - - if err := template.Render(ctx, client, impersonatedIdent, dest); err != nil { - log.WithError(err).Warnf("Failed to render config template %+v", templateConfig) - } - } - } - - log.Infof("Persisted new certificates to disk. Next renewal in approximately %s", cfg.RenewalInterval) - return newClient, newIdentity, nil -} - -func renewLoop(ctx context.Context, cfg *config.BotConfig, client auth.ClientI, ident *identity.Identity, reloadChan chan struct{}) error { - // TODO: failures here should probably not just end the renewal loop, there - // should be some retry / back-off logic. - - // TODO: what should this interval be? should it be user configurable? - // Also, must be < the validity period. - // TODO: validate that cert is actually renewable. - - log.Infof("Beginning renewal loop: ttl=%s interval=%s", cfg.CertificateTTL, cfg.RenewalInterval) - if cfg.RenewalInterval > cfg.CertificateTTL { - log.Errorf( - "Certificate TTL (%s) is shorter than the renewal interval (%s). The next renewal is likely to fail.", - cfg.CertificateTTL, - cfg.RenewalInterval, - ) - } - - // Determine where the bot should write its internal data (renewable cert - // etc) - botDestination, err := cfg.Storage.GetDestination() - if err != nil { - return trace.Wrap(err) - } - - ticker := time.NewTicker(cfg.RenewalInterval) - defer ticker.Stop() - for { - newClient, newIdentity, err := renew(ctx, cfg, client, ident, botDestination) - if err != nil { - return trace.Wrap(err) - } - - if cfg.Oneshot { - log.Info("Oneshot mode enabled, exiting successfully.") - break - } - - client = newClient - ident = newIdentity - - select { - case <-ctx.Done(): - return nil - case <-ticker.C: - continue - case <-reloadChan: - continue - } - } - - return nil -} - -// authenticatedUserClientFromIdentity creates a new auth client from the given -// identity. Note that depending on the connection address given, this may -// attempt to connect via the proxy and therefore requires both SSH and TLS -// credentials. -func authenticatedUserClientFromIdentity(ctx context.Context, id *identity.Identity, authServer string) (auth.ClientI, error) { - if id.SSHCert == nil || id.X509Cert == nil { - return nil, trace.BadParameter("auth client requires a fully formed identity") - } - - tlsConfig, err := id.TLSConfig(nil /* cipherSuites */) - if err != nil { - return nil, trace.Wrap(err) - } - - sshConfig, err := id.SSHClientConfig() - if err != nil { - return nil, trace.Wrap(err) - } - - authAddr, err := utils.ParseAddr(authServer) - if err != nil { - return nil, trace.Wrap(err) - } - - authClientConfig := &authclient.Config{ - TLS: tlsConfig, - SSH: sshConfig, - AuthServers: []utils.NetAddr{*authAddr}, - Log: log, - } - - c, err := authclient.Connect(ctx, authClientConfig) - return c, trace.Wrap(err) -} - -func generateImpersonatedIdentity( - ctx context.Context, - client auth.ClientI, - currentIdentity *identity.Identity, - expires time.Time, - roleRequests []string, - kinds []identity.ArtifactKind, -) (*identity.Identity, error) { - // TODO: enforce expiration > renewal period (by what margin?) - - // Generate a fresh keypair for the impersonated identity. We don't care to - // reuse keys here: impersonated certs might not be as well-protected so - // constantly rotating private keys - privateKey, publicKey, err := native.GenerateKeyPair("") - if err != nil { - return nil, trace.Wrap(err) - } - - // First, ask the auth server to generate a new set of certs with a new - // expiration date. - certs, err := client.GenerateUserCerts(ctx, proto.UserCertsRequest{ - PublicKey: publicKey, - Username: currentIdentity.X509Cert.Subject.CommonName, - Expires: expires, - RoleRequests: roleRequests, - }) - if err != nil { - return nil, trace.Wrap(err) - } - - // The root CA included with the returned user certs will only contain the - // Teleport User CA. We'll also need the host CA for future API calls. - localCA, err := client.GetClusterCACert() - if err != nil { - return nil, trace.Wrap(err) - } - - caCerts, err := tlsca.ParseCertificatePEMs(localCA.TLSCA) - if err != nil { - return nil, trace.Wrap(err) - } - - // Append the host CAs from the auth server. - for _, cert := range caCerts { - pemBytes, err := tlsca.MarshalCertificatePEM(cert) - if err != nil { - return nil, trace.Wrap(err) - } - certs.TLSCACerts = append(certs.TLSCACerts, pemBytes) - } - - newIdentity, err := identity.ReadIdentityFromStore(&identity.LoadIdentityParams{ - PrivateKeyBytes: privateKey, - PublicKeyBytes: publicKey, - }, certs, kinds...) - if err != nil { - return nil, trace.Wrap(err) - } - - return newIdentity, nil -} - -func generateKeys() (private, sshpub, tlspub []byte, err error) { - privateKey, publicKey, err := native.GenerateKeyPair("") - if err != nil { - return nil, nil, nil, trace.Wrap(err) - } - - sshPrivateKey, err := ssh.ParseRawPrivateKey(privateKey) - if err != nil { - return nil, nil, nil, trace.Wrap(err) - } - - tlsPublicKey, err := tlsca.MarshalPublicKeyFromPrivateKeyPEM(sshPrivateKey) - if err != nil { - return nil, nil, nil, trace.Wrap(err) - } - - return privateKey, publicKey, tlsPublicKey, nil -} diff --git a/tool/tbot/renew.go b/tool/tbot/renew.go new file mode 100644 index 0000000000000..2933eb48dc251 --- /dev/null +++ b/tool/tbot/renew.go @@ -0,0 +1,642 @@ +/* +Copyright 2022 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "strings" + "time" + + "github.com/gravitational/teleport" + "github.com/gravitational/teleport/api/client/proto" + "github.com/gravitational/teleport/api/constants" + "github.com/gravitational/teleport/api/defaults" + "github.com/gravitational/teleport/api/types" + "github.com/gravitational/teleport/lib/auth" + "github.com/gravitational/teleport/lib/auth/authclient" + "github.com/gravitational/teleport/lib/auth/native" + "github.com/gravitational/teleport/lib/client" + libdefaults "github.com/gravitational/teleport/lib/defaults" + "github.com/gravitational/teleport/lib/services" + "github.com/gravitational/teleport/lib/tlsca" + "github.com/gravitational/teleport/lib/utils" + "github.com/gravitational/teleport/tool/tbot/config" + "github.com/gravitational/teleport/tool/tbot/destination" + "github.com/gravitational/teleport/tool/tbot/identity" + "github.com/gravitational/trace" + "golang.org/x/crypto/ssh" +) + +// generateKeys generates TLS and SSH keypairs. +func generateKeys() (private, sshpub, tlspub []byte, err error) { + privateKey, publicKey, err := native.GenerateKeyPair("") + if err != nil { + return nil, nil, nil, trace.Wrap(err) + } + + sshPrivateKey, err := ssh.ParseRawPrivateKey(privateKey) + if err != nil { + return nil, nil, nil, trace.Wrap(err) + } + + tlsPublicKey, err := tlsca.MarshalPublicKeyFromPrivateKeyPEM(sshPrivateKey) + if err != nil { + return nil, nil, nil, trace.Wrap(err) + } + + return privateKey, publicKey, tlsPublicKey, nil +} + +// authenticatedUserClientFromIdentity creates a new auth client from the given +// identity. Note that depending on the connection address given, this may +// attempt to connect via the proxy and therefore requires both SSH and TLS +// credentials. +func authenticatedUserClientFromIdentity(ctx context.Context, id *identity.Identity, authServer string) (auth.ClientI, error) { + if id.SSHCert == nil || id.X509Cert == nil { + return nil, trace.BadParameter("auth client requires a fully formed identity") + } + + tlsConfig, err := id.TLSConfig(nil /* cipherSuites */) + if err != nil { + return nil, trace.Wrap(err) + } + + sshConfig, err := id.SSHClientConfig() + if err != nil { + return nil, trace.Wrap(err) + } + + authAddr, err := utils.ParseAddr(authServer) + if err != nil { + return nil, trace.Wrap(err) + } + + authClientConfig := &authclient.Config{ + TLS: tlsConfig, + SSH: sshConfig, + AuthServers: []utils.NetAddr{*authAddr}, + Log: log, + } + + c, err := authclient.Connect(ctx, authClientConfig) + return c, trace.Wrap(err) +} + +// describeTLSIdentity generates an informational message about the given +// TLS identity, appropriate for user-facing log messages. +func describeTLSIdentity(ident *identity.Identity) (string, error) { + cert := ident.X509Cert + if cert == nil { + return "", trace.BadParameter("attempted to describe TLS identity without TLS credentials") + } + + tlsIdent, err := tlsca.FromSubject(cert.Subject, cert.NotAfter) + if err != nil { + return "", trace.Wrap(err, "bot TLS certificate can not be parsed as an identity") + } + + var principals []string + for _, principal := range tlsIdent.Principals { + if !strings.HasPrefix(principal, constants.NoLoginPrefix) { + principals = append(principals, principal) + } + } + + duration := cert.NotAfter.Sub(cert.NotBefore) + return fmt.Sprintf( + "valid: after=%v, before=%v, duration=%s | kind=tls, renewable=%v, disallow-reissue=%v, roles=%v, principals=%v, generation=%v", + cert.NotBefore.Format(time.RFC3339), + cert.NotAfter.Format(time.RFC3339), + duration, + tlsIdent.Renewable, + tlsIdent.DisallowReissue, + tlsIdent.Groups, + principals, + tlsIdent.Generation, + ), nil +} + +// describeSSHIdentity generates an informational message about the given +// SSH identity, appropriate for user-facing log messages. +func describeSSHIdentity(ident *identity.Identity) (string, error) { + cert := ident.SSHCert + if cert == nil { + return "", trace.BadParameter("attempted to describe SSH identity without SSH credentials") + } + + renewable := false + if _, ok := cert.Extensions[teleport.CertExtensionRenewable]; ok { + renewable = true + } + + disallowReissue := false + if _, ok := cert.Extensions[teleport.CertExtensionDisallowReissue]; ok { + disallowReissue = true + } + + var roles []string + if rolesStr, ok := cert.Extensions[teleport.CertExtensionTeleportRoles]; ok { + if actualRoles, err := services.UnmarshalCertRoles(rolesStr); err == nil { + roles = actualRoles + } + } + + var principals []string + for _, principal := range cert.ValidPrincipals { + if !strings.HasPrefix(principal, constants.NoLoginPrefix) { + principals = append(principals, principal) + } + } + + duration := time.Second * time.Duration(cert.ValidBefore-cert.ValidAfter) + return fmt.Sprintf( + "valid: after=%v, before=%v, duration=%s | kind=ssh, renewable=%v, disallow-reissue=%v, roles=%v, principals=%v", + time.Unix(int64(cert.ValidAfter), 0).Format(time.RFC3339), + time.Unix(int64(cert.ValidBefore), 0).Format(time.RFC3339), + duration, + renewable, + disallowReissue, + roles, + principals, + ), nil +} + +// identityConfigurator is a function that alters a cert request +type identityConfigurator = func(req *proto.UserCertsRequest) + +// generateIdentity uses an identity to retrieve another possibly impersonated +// identity. The `configurator` function, if not nil, can be used to add +// additional requests to the certificate request, for example to add +// `RouteToDatabase` and similar fields, however in that case it must be +// called with an impersonated identity that already has the relevant +// permissions, much like `tsh (app|db|kube) login` is already used to generate +// an additional set of certs. +func generateIdentity( + ctx context.Context, + client auth.ClientI, + currentIdentity *identity.Identity, + expires time.Time, + destCfg *config.DestinationConfig, + defaultRoles []string, + configurator identityConfigurator, +) (*identity.Identity, error) { + // TODO: enforce expiration > renewal period (by what margin?) + + // Generate a fresh keypair for the impersonated identity. We don't care to + // reuse keys here: impersonated certs might not be as well-protected so + // constantly rotating private keys + privateKey, publicKey, err := native.GenerateKeyPair("") + if err != nil { + return nil, trace.Wrap(err) + } + + var roleRequests []string + if len(destCfg.Roles) > 0 { + roleRequests = destCfg.Roles + } else { + log.Debugf("Destination specified no roles, defaults will be requested: %v", defaultRoles) + roleRequests = defaultRoles + } + + req := proto.UserCertsRequest{ + PublicKey: publicKey, + Username: currentIdentity.X509Cert.Subject.CommonName, + Expires: expires, + RoleRequests: roleRequests, + RouteToCluster: currentIdentity.ClusterName, + } + + if configurator != nil { + configurator(&req) + } + + // First, ask the auth server to generate a new set of certs with a new + // expiration date. + certs, err := client.GenerateUserCerts(ctx, req) + if err != nil { + return nil, trace.Wrap(err) + } + + // The root CA included with the returned user certs will only contain the + // Teleport User CA. We'll also need the host CA for future API calls. + localCA, err := client.GetClusterCACert() + if err != nil { + return nil, trace.Wrap(err) + } + + caCerts, err := tlsca.ParseCertificatePEMs(localCA.TLSCA) + if err != nil { + return nil, trace.Wrap(err) + } + + // Append the host CAs from the auth server. + for _, cert := range caCerts { + pemBytes, err := tlsca.MarshalCertificatePEM(cert) + if err != nil { + return nil, trace.Wrap(err) + } + certs.TLSCACerts = append(certs.TLSCACerts, pemBytes) + } + + newIdentity, err := identity.ReadIdentityFromStore(&identity.LoadIdentityParams{ + PrivateKeyBytes: privateKey, + PublicKeyBytes: publicKey, + }, certs, destCfg.Kinds...) + if err != nil { + return nil, trace.Wrap(err) + } + + return newIdentity, nil +} + +func getDatabase(ctx context.Context, client auth.ClientI, name string) (types.Database, error) { + res, err := client.ListResources(ctx, proto.ListResourcesRequest{ + Namespace: defaults.Namespace, + ResourceType: types.KindDatabaseServer, + PredicateExpression: fmt.Sprintf(`name == "%s"`, name), + Limit: int32(defaults.DefaultChunkSize), + }) + if err != nil { + return nil, trace.Wrap(err) + } + + servers, err := types.ResourcesWithLabels(res.Resources).AsDatabaseServers() + if err != nil { + return nil, trace.Wrap(err) + } + + var databases []types.Database + for _, server := range servers { + databases = append(databases, server.GetDatabase()) + } + + databases = types.DeduplicateDatabases(databases) + if len(databases) == 0 { + return nil, trace.NotFound("database %q not found", name) + } + + return databases[0], nil +} + +func getRouteToDatabase(ctx context.Context, client auth.ClientI, dbCfg *config.DatabaseConfig) (proto.RouteToDatabase, error) { + if dbCfg.Service == "" { + return proto.RouteToDatabase{}, nil + } + + db, err := getDatabase(ctx, client, dbCfg.Service) + if err != nil { + return proto.RouteToDatabase{}, trace.Wrap(err) + } + + username := dbCfg.Username + if db.GetProtocol() == libdefaults.ProtocolMongoDB && username == "" { + // This isn't strictly a runtime error so killing the process seems + // wrong. We'll just loudly warn about it. + log.Errorf("Database `username` field for %q is unset but is required for MongoDB databases.", dbCfg.Service) + } else if db.GetProtocol() == libdefaults.ProtocolRedis && username == "" { + // Per tsh's lead, fall back to the default username. + username = libdefaults.DefaultRedisUsername + } + + return proto.RouteToDatabase{ + ServiceName: dbCfg.Service, + Protocol: db.GetProtocol(), + Database: dbCfg.Database, + Username: username, + }, nil +} + +func generateImpersonatedIdentity( + ctx context.Context, + client auth.ClientI, + authServer string, + currentIdentity *identity.Identity, + expires time.Time, + destCfg *config.DestinationConfig, + defaultRoles []string, +) (*identity.Identity, error) { + ident, err := generateIdentity(ctx, client, currentIdentity, expires, destCfg, defaultRoles, nil) + if err != nil { + return nil, trace.Wrap(err) + } + + // Now that we have an initial impersonated identity, we can use it to + // request any app/db/etc certs + if destCfg.Database != nil { + impClient, err := authenticatedUserClientFromIdentity(ctx, ident, authServer) + if err != nil { + return nil, trace.Wrap(err) + } + + route, err := getRouteToDatabase(ctx, impClient, destCfg.Database) + if err != nil { + return nil, trace.Wrap(err) + } + + // The impersonated identity is not allowed to reissue certificates, + // so we'll request the database access identity using the main bot + // identity (having gathered the necessary info for RouteToDatabase + // using the correct impersonated ident.) + newIdent, err := generateIdentity(ctx, client, ident, expires, destCfg, defaultRoles, func(req *proto.UserCertsRequest) { + req.RouteToDatabase = route + }) + + log.Infof("Generated identity for database %q", destCfg.Database.Service) + + return newIdent, trace.Wrap(err) + } + + return ident, nil +} + +func getIdentityFromToken(cfg *config.BotConfig) (*identity.Identity, error) { + if cfg.Onboarding == nil { + return nil, trace.BadParameter("onboarding config required via CLI or YAML") + } + if cfg.Onboarding.Token == "" { + return nil, trace.BadParameter("unable to start: no token present") + } + addr, err := utils.ParseAddr(cfg.AuthServer) + if err != nil { + return nil, trace.WrapWithMessage(err, "invalid auth server address %+v", cfg.AuthServer) + } + + tlsPrivateKey, sshPublicKey, tlsPublicKey, err := generateKeys() + if err != nil { + return nil, trace.WrapWithMessage(err, "unable to generate new keypairs") + } + + log.Info("Attempting to generate new identity from token") + params := auth.RegisterParams{ + Token: cfg.Onboarding.Token, + ID: auth.IdentityID{ + Role: types.RoleBot, + }, + Servers: []utils.NetAddr{*addr}, + PublicTLSKey: tlsPublicKey, + PublicSSHKey: sshPublicKey, + CAPins: cfg.Onboarding.CAPins, + CAPath: cfg.Onboarding.CAPath, + GetHostCredentials: client.HostCredentials, + JoinMethod: cfg.Onboarding.JoinMethod, + } + certs, err := auth.Register(params) + if err != nil { + return nil, trace.Wrap(err) + } + sha := sha256.Sum256([]byte(params.Token)) + tokenHash := hex.EncodeToString(sha[:]) + ident, err := identity.ReadIdentityFromStore(&identity.LoadIdentityParams{ + PrivateKeyBytes: tlsPrivateKey, + PublicKeyBytes: sshPublicKey, + TokenHashBytes: []byte(tokenHash), + }, certs, identity.BotKinds()...) + return ident, trace.Wrap(err) +} + +func renewIdentityViaAuth( + ctx context.Context, + client auth.ClientI, + currentIdentity *identity.Identity, + cfg *config.BotConfig, +) (*identity.Identity, error) { + // TODO: enforce expiration > renewal period (by what mwargin?) + + // If using the IAM join method we always go through the initial join flow + // and fetch new nonrenewable certs + var joinMethod types.JoinMethod + if cfg.Onboarding != nil { + joinMethod = cfg.Onboarding.JoinMethod + } + switch joinMethod { + case types.JoinMethodIAM: + ident, err := getIdentityFromToken(cfg) + return ident, trace.Wrap(err) + default: + } + + // Ask the auth server to generate a new set of certs with a new + // expiration date. + certs, err := client.GenerateUserCerts(ctx, proto.UserCertsRequest{ + PublicKey: currentIdentity.PublicKeyBytes, + Username: currentIdentity.X509Cert.Subject.CommonName, + Expires: time.Now().Add(cfg.CertificateTTL), + }) + if err != nil { + return nil, trace.Wrap(err) + } + + newIdentity, err := identity.ReadIdentityFromStore( + currentIdentity.Params(), + certs, + identity.BotKinds()..., + ) + if err != nil { + return nil, trace.Wrap(err) + } + + return newIdentity, nil +} + +// fetchDefaultRoles requests the bot's own role from the auth server and +// extracts its full list of allowed roles. +func fetchDefaultRoles(ctx context.Context, roleGetter services.RoleGetter, botRole string) ([]string, error) { + role, err := roleGetter.GetRole(ctx, botRole) + if err != nil { + return nil, trace.Wrap(err) + } + + conditions := role.GetImpersonateConditions(types.Allow) + return conditions.Roles, nil +} + +// renew performs a single renewal +func renew( + ctx context.Context, cfg *config.BotConfig, client auth.ClientI, + ident *identity.Identity, botDestination destination.Destination, +) (auth.ClientI, *identity.Identity, error) { + // Make sure we can still write to the bot's destination. + if err := identity.VerifyWrite(botDestination); err != nil { + return nil, nil, trace.Wrap(err, "Cannot write to destination %s, aborting.", botDestination) + } + + log.Debug("Attempting to renew bot certificates...") + newIdentity, err := renewIdentityViaAuth(ctx, client, ident, cfg) + if err != nil { + return nil, nil, trace.Wrap(err) + } + + identStr, err := describeTLSIdentity(ident) + if err != nil { + return nil, nil, trace.Wrap(err, "Could not describe bot identity at %s", botDestination) + } + + log.Infof("Successfully renewed bot certificates, %s", identStr) + + // TODO: warn if duration < certTTL? would indicate TTL > server's max renewable cert TTL + // TODO: error if duration < renewalInterval? next renewal attempt will fail + + // Immediately attempt to reconnect using the new identity (still + // haven't persisted the known-good certs). + newClient, err := authenticatedUserClientFromIdentity(ctx, newIdentity, cfg.AuthServer) + if err != nil { + return nil, nil, trace.Wrap(err) + } + + // Attempt a request to make sure our client works. + // TODO: consider a retry/backoff loop. + if _, err := newClient.Ping(ctx); err != nil { + return nil, nil, trace.Wrap(err, "unable to communicate with auth server") + } + + log.Debug("Auth client now using renewed credentials.") + client = newClient + ident = newIdentity + + // Now that we're sure the new creds work, persist them. + if err := identity.SaveIdentity(newIdentity, botDestination, identity.BotKinds()...); err != nil { + return nil, nil, trace.Wrap(err) + } + + // Determine the default role list based on the bot role. The role's + // name should match the certificate's Key ID (user and role names + // should all match bot-$name) + botResourceName := ident.X509Cert.Subject.CommonName + defaultRoles, err := fetchDefaultRoles(ctx, client, botResourceName) + if err != nil { + log.WithError(err).Warnf("Unable to determine default roles, no roles will be requested if unspecified") + defaultRoles = []string{} + } + + // Next, generate impersonated certs + expires := ident.X509Cert.NotAfter + for _, dest := range cfg.Destinations { + destImpl, err := dest.GetDestination() + if err != nil { + return nil, nil, trace.Wrap(err) + } + + // Check the ACLs. We can't fix them, but we can warn if they're + // misconfigured. We'll need to precompute a list of keys to check. + // Note: This may only log a warning, depending on configuration. + if err := destImpl.Verify(identity.ListKeys(dest.Kinds...)); err != nil { + return nil, nil, trace.Wrap(err) + } + + // Ensure this destination is also writable. This is a hard fail if + // ACLs are misconfigured, regardless of configuration. + // TODO: consider not making these a hard error? e.g. write other + // destinations even if this one is broken? + if err := identity.VerifyWrite(destImpl); err != nil { + return nil, nil, trace.Wrap(err, "Could not write to destination %s, aborting.", destImpl) + } + + impersonatedIdent, err := generateImpersonatedIdentity(ctx, client, cfg.AuthServer, ident, expires, dest, defaultRoles) + if err != nil { + return nil, nil, trace.Wrap(err, "Failed to generate impersonated certs for %s: %+v", destImpl, err) + } + + var impersonatedIdentStr string + if dest.ContainsKind(identity.KindTLS) { + impersonatedIdentStr, err = describeTLSIdentity(impersonatedIdent) + if err != nil { + return nil, nil, trace.Wrap(err, "could not describe impersonated certs for destination %s", destImpl) + } + } else { + // Note: kinds must contain at least 1 of TLS or SSH + impersonatedIdentStr, err = describeSSHIdentity(impersonatedIdent) + if err != nil { + return nil, nil, trace.Wrap(err, "could not describe impersonated certs for destination %s", destImpl) + } + } + log.Infof("Successfully renewed impersonated certificates for %s, %s", destImpl, impersonatedIdentStr) + + if err := identity.SaveIdentity(impersonatedIdent, destImpl, dest.Kinds...); err != nil { + return nil, nil, trace.Wrap(err, "failed to save impersonated identity to destination %s", destImpl) + } + + for _, templateConfig := range dest.Configs { + template, err := templateConfig.GetConfigTemplate() + if err != nil { + return nil, nil, trace.Wrap(err) + } + + if err := template.Render(ctx, client, impersonatedIdent, dest); err != nil { + log.WithError(err).Warnf("Failed to render config template %+v", templateConfig) + } + } + } + + log.Infof("Persisted new certificates to disk. Next renewal in approximately %s", cfg.RenewalInterval) + return newClient, newIdentity, nil +} + +func renewLoop(ctx context.Context, cfg *config.BotConfig, client auth.ClientI, ident *identity.Identity, reloadChan chan struct{}) error { + // TODO: failures here should probably not just end the renewal loop, there + // should be some retry / back-off logic. + + // TODO: what should this interval be? should it be user configurable? + // Also, must be < the validity period. + // TODO: validate that cert is actually renewable. + + log.Infof("Beginning renewal loop: ttl=%s interval=%s", cfg.CertificateTTL, cfg.RenewalInterval) + if cfg.RenewalInterval > cfg.CertificateTTL { + log.Errorf( + "Certificate TTL (%s) is shorter than the renewal interval (%s). The next renewal is likely to fail.", + cfg.CertificateTTL, + cfg.RenewalInterval, + ) + } + + // Determine where the bot should write its internal data (renewable cert + // etc) + botDestination, err := cfg.Storage.GetDestination() + if err != nil { + return trace.Wrap(err) + } + + ticker := time.NewTicker(cfg.RenewalInterval) + defer ticker.Stop() + for { + newClient, newIdentity, err := renew(ctx, cfg, client, ident, botDestination) + if err != nil { + return trace.Wrap(err) + } + + if cfg.Oneshot { + log.Info("Oneshot mode enabled, exiting successfully.") + break + } + + client = newClient + ident = newIdentity + + select { + case <-ctx.Done(): + return nil + case <-ticker.C: + continue + case <-reloadChan: + continue + } + } + + return nil +} diff --git a/tool/tbot/renew_test.go b/tool/tbot/renew_test.go new file mode 100644 index 0000000000000..3965ce3853d50 --- /dev/null +++ b/tool/tbot/renew_test.go @@ -0,0 +1,151 @@ +/* +Copyright 2022 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "testing" + "time" + + "github.com/gravitational/teleport/api/types" + "github.com/gravitational/teleport/api/utils" + libconfig "github.com/gravitational/teleport/lib/config" + "github.com/gravitational/teleport/lib/tlsca" + "github.com/gravitational/teleport/tool/tbot/config" + "github.com/gravitational/teleport/tool/tbot/identity" + "github.com/gravitational/teleport/tool/tbot/testhelpers" + "github.com/gravitational/trace" + "github.com/stretchr/testify/require" +) + +// TestOnboardViaToken ensures a bot can join using token auth. +func TestOnboardViaToken(t *testing.T) { + // Make a new auth server. + fc := testhelpers.DefaultConfig(t) + _ = testhelpers.MakeAndRunTestAuthServer(t, fc) + rootClient := testhelpers.MakeDefaultAuthClient(t, fc) + + // Make and join a new bot instance. + botParams := testhelpers.MakeBot(t, rootClient, "test") + botConfig := testhelpers.MakeMemoryBotConfig(t, fc, botParams) + ident, err := getIdentityFromToken(botConfig) + require.NoError(t, err) + + tlsIdent, err := tlsca.FromSubject(ident.X509Cert.Subject, ident.X509Cert.NotAfter) + require.NoError(t, err) + + require.True(t, tlsIdent.Renewable) + require.False(t, tlsIdent.DisallowReissue) + require.Equal(t, uint64(1), tlsIdent.Generation) + require.ElementsMatch(t, []string{botParams.RoleName}, tlsIdent.Groups) + + // Make sure the bot identity actually works. + botClient := testhelpers.MakeBotAuthClient(t, fc, ident) + _, err = botClient.GetClusterName() + require.NoError(t, err) +} + +func TestDatabaseRequest(t *testing.T) { + // Make a new auth server. + fc := testhelpers.DefaultConfig(t) + fc.Databases.Databases = []*libconfig.Database{ + { + Name: "foo", + Protocol: "mysql", + URI: "foo.example.com:1234", + StaticLabels: map[string]string{ + "env": "dev", + }, + }, + } + _ = testhelpers.MakeAndRunTestAuthServer(t, fc) + rootClient := testhelpers.MakeDefaultAuthClient(t, fc) + + // Wait for the database to become available. Sometimes this takes a bit + // of time in CI. + for i := 0; i < 10; i++ { + _, err := getDatabase(context.Background(), rootClient, "foo") + if err == nil { + break + } else if !trace.IsNotFound(err) { + require.NoError(t, err) + } + + if i >= 9 { + t.Fatalf("database never became available") + } + + t.Logf("Database not yet available, waiting...") + time.Sleep(time.Second * 1) + } + + // Note: we don't actually need a role granting us database access to + // request it. Actual access is validated via RBAC at connection time. + // We do need an actual database and permission to list them, however. + + // Create a role to grant access to the database. + const roleName = "db-role" + role, err := types.NewRole(roleName, types.RoleSpecV5{ + Allow: types.RoleConditions{ + DatabaseLabels: types.Labels{ + "*": utils.Strings{"*"}, + }, + DatabaseNames: []string{"bar"}, + DatabaseUsers: []string{"baz"}, + Rules: []types.Rule{ + types.NewRule("db_server", []string{"read", "list"}), + }, + }, + }) + require.NoError(t, err) + + require.NoError(t, rootClient.UpsertRole(context.Background(), role)) + + // Make and join a new bot instance. + botParams := testhelpers.MakeBot(t, rootClient, "test", roleName) + botConfig := testhelpers.MakeMemoryBotConfig(t, fc, botParams) + + dest := botConfig.Destinations[0] + dest.Kinds = []identity.ArtifactKind{identity.KindSSH, identity.KindTLS} + dest.Database = &config.DatabaseConfig{ + Service: "foo", + Database: "bar", + Username: "baz", + } + + // Onboard the bot. + ident, err := getIdentityFromToken(botConfig) + require.NoError(t, err) + + botClient := testhelpers.MakeBotAuthClient(t, fc, ident) + + impersonatedIdent, err := generateImpersonatedIdentity( + context.Background(), botClient, botConfig.AuthServer, ident, + ident.X509Cert.NotAfter, dest, []string{roleName}, + ) + require.NoError(t, err) + + tlsIdent, err := tlsca.FromSubject(impersonatedIdent.X509Cert.Subject, impersonatedIdent.X509Cert.NotAfter) + require.NoError(t, err) + + route := tlsIdent.RouteToDatabase + + require.Equal(t, "foo", route.ServiceName) + require.Equal(t, "bar", route.Database) + require.Equal(t, "baz", route.Username) + require.Equal(t, "mysql", route.Protocol) +} diff --git a/tool/tbot/testhelpers/srv.go b/tool/tbot/testhelpers/srv.go new file mode 100644 index 0000000000000..4bddd31065571 --- /dev/null +++ b/tool/tbot/testhelpers/srv.go @@ -0,0 +1,196 @@ +/* +Copyright 2022 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testhelpers + +import ( + "context" + "net" + "path/filepath" + "testing" + "time" + + "github.com/gravitational/teleport" + "github.com/gravitational/teleport/api/client/proto" + "github.com/gravitational/teleport/api/types" + "github.com/gravitational/teleport/lib/auth" + "github.com/gravitational/teleport/lib/auth/authclient" + "github.com/gravitational/teleport/lib/config" + "github.com/gravitational/teleport/lib/service" + "github.com/gravitational/teleport/lib/utils" + botconfig "github.com/gravitational/teleport/tool/tbot/config" + "github.com/gravitational/teleport/tool/tbot/identity" + "github.com/stretchr/testify/require" +) + +func DefaultConfig(t *testing.T) *config.FileConfig { + return &config.FileConfig{ + Global: config.Global{ + DataDir: t.TempDir(), + }, + Databases: config.Databases{ + Service: config.Service{ + EnabledFlag: "true", + }, + }, + Proxy: config.Proxy{ + Service: config.Service{ + EnabledFlag: "true", + }, + WebAddr: mustGetFreeLocalListenerAddr(t), + TunAddr: mustGetFreeLocalListenerAddr(t), + }, + Auth: config.Auth{ + Service: config.Service{ + EnabledFlag: "true", + ListenAddress: mustGetFreeLocalListenerAddr(t), + }, + }, + } +} + +func mustGetFreeLocalListenerAddr(t *testing.T) string { + l, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer l.Close() + return l.Addr().String() +} + +// MakeAndRunTestAuthServer creates an auth server useful for testing purposes. +func MakeAndRunTestAuthServer(t *testing.T, fc *config.FileConfig) (auth *service.TeleportProcess) { + t.Helper() + + var err error + cfg := service.MakeDefaultConfig() + require.NoError(t, config.ApplyFileConfig(fc, cfg)) + + cfg.CachePolicy.Enabled = false + cfg.Proxy.DisableWebInterface = true + auth, err = service.NewTeleport(cfg) + require.NoError(t, err) + require.NoError(t, auth.Start()) + + t.Cleanup(func() { + auth.Close() + }) + + eventCh := make(chan service.Event, 1) + auth.WaitForEvent(auth.ExitContext(), service.AuthTLSReady, eventCh) + select { + case <-eventCh: + case <-time.After(30 * time.Second): + // in reality, the auth server should start *much* sooner than this. we use a very large + // timeout here because this isn't the kind of problem that this test is meant to catch. + t.Fatal("auth server didn't start after 30s") + } + return auth +} + +// MakeBotAuthClient creates a new auth client using a Bot identity. +func MakeBotAuthClient(t *testing.T, fc *config.FileConfig, ident *identity.Identity) auth.ClientI { + t.Helper() + + cfg := service.MakeDefaultConfig() + err := config.ApplyFileConfig(fc, cfg) + require.NoError(t, err) + + authConfig := new(authclient.Config) + authConfig.TLS, err = ident.TLSConfig(cfg.CipherSuites) + require.NoError(t, err) + + authConfig.AuthServers = cfg.AuthServers + authConfig.Log = cfg.Log + + client, err := authclient.Connect(context.Background(), authConfig) + require.NoError(t, err) + + return client +} + +// MakeDefaultAuthClient reimplements the bare minimum needed to create a +// default root-level auth client for a Teleport server started by +// MakeAndRunTestAuthServer. +func MakeDefaultAuthClient(t *testing.T, fc *config.FileConfig) auth.ClientI { + t.Helper() + + cfg := service.MakeDefaultConfig() + err := config.ApplyFileConfig(fc, cfg) + require.NoError(t, err) + + cfg.HostUUID, err = utils.ReadHostUUID(cfg.DataDir) + require.NoError(t, err) + + identity, err := auth.ReadLocalIdentity(filepath.Join(cfg.DataDir, teleport.ComponentProcess), auth.IdentityID{Role: types.RoleAdmin, HostUUID: cfg.HostUUID}) + require.NoError(t, err) + + authConfig := new(authclient.Config) + authConfig.TLS, err = identity.TLSConfig(cfg.CipherSuites) + require.NoError(t, err) + + authConfig.AuthServers = cfg.AuthServers + authConfig.Log = cfg.Log + + client, err := authclient.Connect(context.Background(), authConfig) + require.NoError(t, err) + + return client +} + +// MakeBot creates a server-side bot and returns joining parameters. +func MakeBot(t *testing.T, client auth.ClientI, name string, roles ...string) *proto.CreateBotResponse { + t.Helper() + + bot, err := client.CreateBot(context.Background(), &proto.CreateBotRequest{ + Name: name, + Roles: roles, + }) + + require.NoError(t, err) + return bot +} + +// MakeMemoryBotConfig creates a usable bot config from joining parameters. It +// only writes artifacts to memory and can be further modified if desired. +func MakeMemoryBotConfig(t *testing.T, fc *config.FileConfig, botParams *proto.CreateBotResponse) *botconfig.BotConfig { + t.Helper() + + authCfg := service.MakeDefaultConfig() + err := config.ApplyFileConfig(fc, authCfg) + require.NoError(t, err) + + cfg := &botconfig.BotConfig{ + AuthServer: authCfg.AuthServers[0].String(), + Onboarding: &botconfig.OnboardingConfig{ + JoinMethod: botParams.JoinMethod, + Token: botParams.TokenID, + }, + Storage: &botconfig.StorageConfig{ + DestinationMixin: botconfig.DestinationMixin{ + Memory: &botconfig.DestinationMemory{}, + }, + }, + Destinations: []*botconfig.DestinationConfig{ + { + DestinationMixin: botconfig.DestinationMixin{ + Memory: &botconfig.DestinationMemory{}, + }, + }, + }, + } + require.NoError(t, cfg.CheckAndSetDefaults()) + + return cfg +} diff --git a/tool/tctl/common/resource_command.go b/tool/tctl/common/resource_command.go index 87a55e6af44cd..527314f03d8bd 100644 --- a/tool/tctl/common/resource_command.go +++ b/tool/tctl/common/resource_command.go @@ -630,6 +630,11 @@ func (rc *ResourceCommand) Delete(client auth.ClientI) (err error) { return trace.Wrap(err) } fmt.Printf("role %q has been deleted\n", rc.ref.Name) + case types.KindToken: + if err = client.DeleteToken(ctx, rc.ref.Name); err != nil { + return trace.Wrap(err) + } + fmt.Printf("token %q has been deleted\n", rc.ref.Name) case types.KindSAMLConnector: if err = client.DeleteSAMLConnector(ctx, rc.ref.Name); err != nil { return trace.Wrap(err) diff --git a/tool/tctl/common/token_command.go b/tool/tctl/common/token_command.go index 9d48a924c6ba8..752f047a7e568 100644 --- a/tool/tctl/common/token_command.go +++ b/tool/tctl/common/token_command.go @@ -91,7 +91,7 @@ func (c *TokenCommand) Initialize(app *kingpin.Application, config *service.Conf // tctl tokens add ..." c.tokenAdd = tokens.Command("add", "Create a invitation token") - c.tokenAdd.Flag("type", "Type of token to add").Required().StringVar(&c.tokenType) + c.tokenAdd.Flag("type", "Type(s) of token to add, e.g. --type=node,app,db").Required().StringVar(&c.tokenType) c.tokenAdd.Flag("value", "Value of token to add").StringVar(&c.value) c.tokenAdd.Flag("labels", "Set token labels, e.g. env=prod,region=us-west").StringVar(&c.labels) c.tokenAdd.Flag("ttl", fmt.Sprintf("Set expiration time for token, default is %v hour", diff --git a/tool/tctl/common/user_command.go b/tool/tctl/common/user_command.go index c6ad02bf7a08f..024022da6aa50 100644 --- a/tool/tctl/common/user_command.go +++ b/tool/tctl/common/user_command.go @@ -41,9 +41,12 @@ type UserCommand struct { login string allowedLogins []string allowedWindowsLogins []string + allowedKubeUsers []string + allowedKubeGroups []string + allowedDatabaseUsers []string + allowedDatabaseNames []string + allowedAWSRoleARNs []string createRoles []string - kubeUsers string - kubeGroups string ttl time.Duration @@ -72,6 +75,12 @@ func (u *UserCommand) Initialize(app *kingpin.Application, config *service.Confi u.userAdd.Flag("logins", "List of allowed SSH logins for the new user").StringsVar(&u.allowedLogins) u.userAdd.Flag("windows-logins", "List of allowed Windows logins for the new user").StringsVar(&u.allowedWindowsLogins) + u.userAdd.Flag("kubernetes-users", "List of allowed Kubernetes users for the new user").StringsVar(&u.allowedKubeUsers) + u.userAdd.Flag("kubernetes-groups", "List of allowed Kubernetes groups for the new user").StringsVar(&u.allowedKubeGroups) + u.userAdd.Flag("db-users", "List of allowed database users for the new user").StringsVar(&u.allowedDatabaseUsers) + u.userAdd.Flag("db-names", "List of allowed database names for the new user").StringsVar(&u.allowedDatabaseNames) + u.userAdd.Flag("aws-role-arns", "List of allowed AWS role ARNs for the new user").StringsVar(&u.allowedAWSRoleARNs) + u.userAdd.Flag("roles", "List of roles for the new user to assume").Required().StringsVar(&u.createRoles) u.userAdd.Flag("ttl", fmt.Sprintf("Set expiration time for token, default is %v, maximum is %v", @@ -200,8 +209,11 @@ func (u *UserCommand) Add(client auth.ClientI) error { traits := map[string][]string{ teleport.TraitLogins: u.allowedLogins, teleport.TraitWindowsLogins: u.allowedWindowsLogins, - teleport.TraitKubeUsers: flattenSlice([]string{u.kubeUsers}), - teleport.TraitKubeGroups: flattenSlice([]string{u.kubeGroups}), + teleport.TraitKubeUsers: flattenSlice(u.allowedKubeUsers), + teleport.TraitKubeGroups: flattenSlice(u.allowedKubeGroups), + teleport.TraitDBUsers: flattenSlice(u.allowedDatabaseUsers), + teleport.TraitDBNames: flattenSlice(u.allowedDatabaseNames), + teleport.TraitAWSRoleARNs: flattenSlice(u.allowedAWSRoleARNs), } user, err := types.NewUser(u.login) diff --git a/tool/tsh/kube.go b/tool/tsh/kube.go index e6b9d3219f503..25f249c523edc 100644 --- a/tool/tsh/kube.go +++ b/tool/tsh/kube.go @@ -808,7 +808,7 @@ func fetchKubeClusters(ctx context.Context, tc *client.TeleportClient) (teleport Labels: tc.Labels, }) if err != nil { - // ListResources for kube service not availalbe, provide fallback. + // ListResources for kube service not available, provide fallback. // Fallback does not support filters, so if users // provide them, it does nothing. // diff --git a/tool/tsh/tsh.go b/tool/tsh/tsh.go index 78677af1016c3..42cef02b934f4 100644 --- a/tool/tsh/tsh.go +++ b/tool/tsh/tsh.go @@ -36,6 +36,7 @@ import ( "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/agent" + "github.com/ghodss/yaml" "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/constants" apidefaults "github.com/gravitational/teleport/api/defaults" @@ -65,8 +66,6 @@ import ( "github.com/gravitational/kingpin" "github.com/gravitational/trace" - "github.com/ghodss/yaml" - gops "github.com/google/gops/agent" "github.com/jonboulle/clockwork" "github.com/sirupsen/logrus" ) @@ -177,11 +176,6 @@ type CLIConf struct { BenchValueScale float64 // Context is a context to control execution Context context.Context - // Gops starts gops agent on a specified address - // if not specified, gops won't start - Gops bool - // GopsAddr specifies to gops addr to listen on - GopsAddr string // IdentityFileIn is an argument to -i flag (path to the private key+cert file) IdentityFileIn string // Compatibility flags, --compat, specifies OpenSSH compatibility flags. @@ -425,8 +419,6 @@ func Run(args []string, opts ...cliOption) error { app.Flag("auth", "Specify the name of authentication connector to use.").Envar(authEnvVar).StringVar(&cf.AuthConnector) app.Flag("namespace", "Namespace of the cluster").Default(apidefaults.Namespace).Hidden().StringVar(&cf.Namespace) - app.Flag("gops", "Start gops endpoint on a given address").Hidden().BoolVar(&cf.Gops) - app.Flag("gops-addr", "Specify gops addr to listen on").Hidden().StringVar(&cf.GopsAddr) app.Flag("skip-version-check", "Skip version checking between server and client.").BoolVar(&cf.SkipVersionCheck) app.Flag("debug", "Verbose logging to stdout").Short('d').BoolVar(&cf.Debug) app.Flag("add-keys-to-agent", fmt.Sprintf("Controls how keys are handled. Valid values are %v.", client.AllAddKeysOptions)).Short('k').Envar(addKeysToAgentEnvVar).Default(client.AddKeysToAgentAuto).StringVar(&cf.AddKeysToAgent) @@ -722,14 +714,6 @@ func Run(args []string, opts ...cliOption) error { }() cf.Context = ctx - if cf.Gops { - log.Debugf("Starting gops agent.") - err = gops.Listen(gops.Options{Addr: cf.GopsAddr}) - if err != nil { - log.Warningf("Failed to start gops agent %v.", err) - } - } - cf.executablePath, err = os.Executable() if err != nil { return trace.Wrap(err) diff --git a/webassets b/webassets index 2a66dd8eb4714..b21c30aeeb946 160000 --- a/webassets +++ b/webassets @@ -1 +1 @@ -Subproject commit 2a66dd8eb47143a0a0d57d2dab67e9981a7b83b4 +Subproject commit b21c30aeeb9463164a7f1332c5f4987d48b5337e