From fcef8adfb06e41b1c0c4dbe5be73a8e1595d5937 Mon Sep 17 00:00:00 2001 From: divyaac Date: Tue, 9 Apr 2024 12:35:39 -0700 Subject: [PATCH 1/4] Auto Auth Healing for Proxy (#26307) * Auto Auth Healing for Proxy * Edited changelog * Fix failing tests and small comment change * Readded check because proxy cache is initialized with inmem sink --- changelog/26307.txt | 4 + command/agent.go | 176 ++-- command/agent/cache_end_to_end_test.go | 2 +- command/agentproxyshared/cache/api_proxy.go | 2 +- .../agentproxyshared/cache/api_proxy_test.go | 4 +- command/agentproxyshared/cache/cache_test.go | 4 +- command/agentproxyshared/cache/handler.go | 26 +- .../cache/static_secret_cache_updater.go | 27 +- .../cache/static_secret_cache_updater_test.go | 131 +++ command/proxy.go | 128 +-- command/proxy_test.go | 895 +++++++++++++++++- 11 files changed, 1243 insertions(+), 156 deletions(-) create mode 100644 changelog/26307.txt diff --git a/changelog/26307.txt b/changelog/26307.txt new file mode 100644 index 000000000000..1684c8d42805 --- /dev/null +++ b/changelog/26307.txt @@ -0,0 +1,4 @@ +```release-note:improvement +proxy: Proxy will re-trigger auto auth if the token used for requests has been revoked, has exceeded the number of uses, +or is an otherwise invalid value. +``` diff --git a/command/agent.go b/command/agent.go index 360a66b79147..3f9e7b141785 100644 --- a/command/agent.go +++ b/command/agent.go @@ -16,6 +16,7 @@ import ( "sort" "strings" "sync" + "sync/atomic" "time" systemd "github.com/coreos/go-systemd/daemon" @@ -540,6 +541,83 @@ func (c *AgentCommand) Run(args []string) int { } } + // Create the AuthHandler, SinkServer, TemplateServer, and ExecServer now so that we can pass AuthHandler struct + // values into the Proxy http.Handler. We will wait to actually start these servers + // once we have configured the handlers for each listener below + authInProgress := &atomic.Bool{} + invalidTokenErrCh := make(chan error) + var ah *auth.AuthHandler + var ss *sink.SinkServer + var ts *template.Server + var es *exec.Server + if method != nil { + enableTemplateTokenCh := len(config.Templates) > 0 + enableEnvTemplateTokenCh := len(config.EnvTemplates) > 0 + + // Auth Handler is going to set its own retry values, so we want to + // work on a copy of the client to not affect other subsystems. + ahClient, err := c.client.CloneWithHeaders() + if err != nil { + c.UI.Error(fmt.Sprintf("Error cloning client for auth handler: %v", err)) + return 1 + } + + // Override the set namespace with the auto-auth specific namespace + if !namespaceSetByEnvironmentVariable && config.AutoAuth.Method.Namespace != "" { + ahClient.SetNamespace(config.AutoAuth.Method.Namespace) + } + + if config.DisableIdleConnsAutoAuth { + ahClient.SetMaxIdleConnections(-1) + } + + if config.DisableKeepAlivesAutoAuth { + ahClient.SetDisableKeepAlives(true) + } + + ah = auth.NewAuthHandler(&auth.AuthHandlerConfig{ + Logger: c.logger.Named("auth.handler"), + Client: ahClient, + WrapTTL: config.AutoAuth.Method.WrapTTL, + MinBackoff: config.AutoAuth.Method.MinBackoff, + MaxBackoff: config.AutoAuth.Method.MaxBackoff, + EnableReauthOnNewCredentials: config.AutoAuth.EnableReauthOnNewCredentials, + EnableTemplateTokenCh: enableTemplateTokenCh, + EnableExecTokenCh: enableEnvTemplateTokenCh, + Token: previousToken, + ExitOnError: config.AutoAuth.Method.ExitOnError, + UserAgent: useragent.AgentAutoAuthString(), + MetricsSignifier: "agent", + }) + + ss = sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: c.logger.Named("sink.server"), + Client: ahClient, + ExitAfterAuth: config.ExitAfterAuth, + }) + + ts = template.NewServer(&template.ServerConfig{ + Logger: c.logger.Named("template.server"), + LogLevel: c.logger.GetLevel(), + LogWriter: c.logWriter, + AgentConfig: c.config, + Namespace: templateNamespace, + ExitAfterAuth: config.ExitAfterAuth, + }) + + es, err = exec.NewServer(&exec.ServerConfig{ + AgentConfig: c.config, + Namespace: templateNamespace, + Logger: c.logger.Named("exec.server"), + LogLevel: c.logger.GetLevel(), + LogWriter: c.logWriter, + }) + if err != nil { + c.logger.Error("could not create exec server", "error", err) + return 1 + } + } + var listeners []net.Listener // If there are templates, add an in-process listener @@ -578,31 +656,28 @@ func (c *AgentCommand) Run(args []string) int { listeners = append(listeners, ln) proxyVaultToken := true - var inmemSink sink.Sink + apiProxyLogger.Debug("auto-auth token is allowed to be used; configuring inmem sink") + inmemSink, err := inmem.New(&sink.SinkConfig{ + Logger: apiProxyLogger, + }, leaseCache) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating inmem sink for cache: %v", err)) + c.tlsReloadFuncsLock.Unlock() + return 1 + } + sinks = append(sinks, &sink.SinkConfig{ + Logger: apiProxyLogger, + Sink: inmemSink, + }) if config.APIProxy != nil { - if config.APIProxy.UseAutoAuthToken { - apiProxyLogger.Debug("auto-auth token is allowed to be used; configuring inmem sink") - inmemSink, err = inmem.New(&sink.SinkConfig{ - Logger: apiProxyLogger, - }, leaseCache) - if err != nil { - c.UI.Error(fmt.Sprintf("Error creating inmem sink for cache: %v", err)) - c.tlsReloadFuncsLock.Unlock() - return 1 - } - sinks = append(sinks, &sink.SinkConfig{ - Logger: apiProxyLogger, - Sink: inmemSink, - }) - } proxyVaultToken = !config.APIProxy.ForceAutoAuthToken } var muxHandler http.Handler if leaseCache != nil { - muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, leaseCache, inmemSink, proxyVaultToken) + muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, leaseCache, inmemSink, proxyVaultToken, authInProgress, invalidTokenErrCh) } else { - muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, apiProxy, inmemSink, proxyVaultToken) + muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, apiProxy, inmemSink, proxyVaultToken, authInProgress, invalidTokenErrCh) } // Parse 'require_request_header' listener config option, and wrap @@ -708,71 +783,6 @@ func (c *AgentCommand) Run(args []string) int { // Start auto-auth and sink servers if method != nil { - enableTemplateTokenCh := len(config.Templates) > 0 - enableEnvTemplateTokenCh := len(config.EnvTemplates) > 0 - - // Auth Handler is going to set its own retry values, so we want to - // work on a copy of the client to not affect other subsystems. - ahClient, err := c.client.CloneWithHeaders() - if err != nil { - c.UI.Error(fmt.Sprintf("Error cloning client for auth handler: %v", err)) - return 1 - } - - // Override the set namespace with the auto-auth specific namespace - if !namespaceSetByEnvironmentVariable && config.AutoAuth.Method.Namespace != "" { - ahClient.SetNamespace(config.AutoAuth.Method.Namespace) - } - - if config.DisableIdleConnsAutoAuth { - ahClient.SetMaxIdleConnections(-1) - } - - if config.DisableKeepAlivesAutoAuth { - ahClient.SetDisableKeepAlives(true) - } - - ah := auth.NewAuthHandler(&auth.AuthHandlerConfig{ - Logger: c.logger.Named("auth.handler"), - Client: ahClient, - WrapTTL: config.AutoAuth.Method.WrapTTL, - MinBackoff: config.AutoAuth.Method.MinBackoff, - MaxBackoff: config.AutoAuth.Method.MaxBackoff, - EnableReauthOnNewCredentials: config.AutoAuth.EnableReauthOnNewCredentials, - EnableTemplateTokenCh: enableTemplateTokenCh, - EnableExecTokenCh: enableEnvTemplateTokenCh, - Token: previousToken, - ExitOnError: config.AutoAuth.Method.ExitOnError, - UserAgent: useragent.AgentAutoAuthString(), - MetricsSignifier: "agent", - }) - - ss := sink.NewSinkServer(&sink.SinkServerConfig{ - Logger: c.logger.Named("sink.server"), - Client: ahClient, - ExitAfterAuth: config.ExitAfterAuth, - }) - - ts := template.NewServer(&template.ServerConfig{ - Logger: c.logger.Named("template.server"), - LogLevel: c.logger.GetLevel(), - LogWriter: c.logWriter, - AgentConfig: c.config, - Namespace: templateNamespace, - ExitAfterAuth: config.ExitAfterAuth, - }) - - es, err := exec.NewServer(&exec.ServerConfig{ - AgentConfig: c.config, - Namespace: templateNamespace, - Logger: c.logger.Named("exec.server"), - LogLevel: c.logger.GetLevel(), - LogWriter: c.logWriter, - }) - if err != nil { - c.logger.Error("could not create exec server", "error", err) - return 1 - } g.Add(func() error { return ah.Run(ctx, method) diff --git a/command/agent/cache_end_to_end_test.go b/command/agent/cache_end_to_end_test.go index 9db56cb6f056..7fa6c0fc23a0 100644 --- a/command/agent/cache_end_to_end_test.go +++ b/command/agent/cache_end_to_end_test.go @@ -319,7 +319,7 @@ func TestCache_UsingAutoAuthToken(t *testing.T) { mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx)) // Passing a non-nil inmemsink tells the agent to use the auto-auth token - mux.Handle("/", cache.ProxyHandler(ctx, cacheLogger, leaseCache, inmemSink, true)) + mux.Handle("/", cache.ProxyHandler(ctx, cacheLogger, leaseCache, inmemSink, true, nil, nil)) server := &http.Server{ Handler: mux, ReadHeaderTimeout: 10 * time.Second, diff --git a/command/agentproxyshared/cache/api_proxy.go b/command/agentproxyshared/cache/api_proxy.go index 35aea9f1e671..6cc674ee02dd 100644 --- a/command/agentproxyshared/cache/api_proxy.go +++ b/command/agentproxyshared/cache/api_proxy.go @@ -9,7 +9,7 @@ import ( gohttp "net/http" "sync" - hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-retryablehttp" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/namespace" diff --git a/command/agentproxyshared/cache/api_proxy_test.go b/command/agentproxyshared/cache/api_proxy_test.go index 9e7035918cb3..234c6ae6edb7 100644 --- a/command/agentproxyshared/cache/api_proxy_test.go +++ b/command/agentproxyshared/cache/api_proxy_test.go @@ -285,9 +285,9 @@ func setupClusterAndAgentCommon(ctx context.Context, t *testing.T, coreConfig *v mux.Handle("/agent/v1/cache-clear", leaseCache.HandleCacheClear(ctx)) - mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, nil, true)) + mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, nil, true, nil, nil)) } else { - mux.Handle("/", ProxyHandler(ctx, apiProxyLogger, apiProxy, nil, true)) + mux.Handle("/", ProxyHandler(ctx, apiProxyLogger, apiProxy, nil, true, nil, nil)) } server := &http.Server{ diff --git a/command/agentproxyshared/cache/cache_test.go b/command/agentproxyshared/cache/cache_test.go index 12e1e18e3a40..f19267e055d2 100644 --- a/command/agentproxyshared/cache/cache_test.go +++ b/command/agentproxyshared/cache/cache_test.go @@ -81,7 +81,7 @@ func TestCache_AutoAuthTokenStripping(t *testing.T) { mux := http.NewServeMux() mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx)) - mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, mock.NewSink("testid"), true)) + mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, mock.NewSink("testid"), true, nil, nil)) server := &http.Server{ Handler: mux, ReadHeaderTimeout: 10 * time.Second, @@ -170,7 +170,7 @@ func TestCache_AutoAuthClientTokenProxyStripping(t *testing.T) { mux := http.NewServeMux() // mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx)) - mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, mock.NewSink(realToken), false)) + mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, mock.NewSink(realToken), false, nil, nil)) server := &http.Server{ Handler: mux, ReadHeaderTimeout: 10 * time.Second, diff --git a/command/agentproxyshared/cache/handler.go b/command/agentproxyshared/cache/handler.go index 25acaee01cb8..107c38402460 100644 --- a/command/agentproxyshared/cache/handler.go +++ b/command/agentproxyshared/cache/handler.go @@ -13,6 +13,8 @@ import ( "io" "io/ioutil" "net/http" + "strings" + "sync/atomic" "time" "github.com/armon/go-metrics" @@ -23,7 +25,7 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -func ProxyHandler(ctx context.Context, logger hclog.Logger, proxier Proxier, inmemSink sink.Sink, proxyVaultToken bool) http.Handler { +func ProxyHandler(ctx context.Context, logger hclog.Logger, proxier Proxier, inmemSink sink.Sink, proxyVaultToken bool, authInProgress *atomic.Bool, invalidTokenErrCh chan error) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { logger.Info("received request", "method", r.Method, "path", r.URL.Path) @@ -33,9 +35,13 @@ func ProxyHandler(ctx context.Context, logger hclog.Logger, proxier Proxier, inm token := r.Header.Get(consts.AuthHeaderName) - if token == "" && inmemSink != nil { - logger.Debug("using auto auth token", "method", r.Method, "path", r.URL.Path) - token = inmemSink.(sink.SinkReader).Token() + var autoAuthToken string + if inmemSink != nil { + autoAuthToken = inmemSink.(sink.SinkReader).Token() + if token == "" { + logger.Debug("using auto auth token", "method", r.Method, "path", r.URL.Path) + token = autoAuthToken + } } // Parse and reset body. @@ -59,10 +65,22 @@ func ProxyHandler(ctx context.Context, logger hclog.Logger, proxier Proxier, inm if err != nil { // If this is an api.Response error, don't wrap the response. if resp != nil && resp.Response.Error() != nil { + responseErrMessage := resp.Response.Error() copyHeader(w.Header(), resp.Response.Header) w.WriteHeader(resp.Response.StatusCode) io.Copy(w, resp.Response.Body) metrics.IncrCounter([]string{"agent", "proxy", "client_error"}, 1) + // Re-trigger auto auth if the token is the same as the auto auth token + if resp.Response.StatusCode == 403 && strings.Contains(responseErrMessage.Error(), logical.ErrInvalidToken.Error()) && + autoAuthToken == token && !authInProgress.Load() { + // Drain the error channel first + logger.Info("proxy received an invalid token error") + select { + case <-invalidTokenErrCh: + default: + } + invalidTokenErrCh <- resp.Response.Error() + } } else { metrics.IncrCounter([]string{"agent", "proxy", "error"}, 1) logical.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to get the response: %w", err)) diff --git a/command/agentproxyshared/cache/static_secret_cache_updater.go b/command/agentproxyshared/cache/static_secret_cache_updater.go index fbae0f3f24d7..dc001f241996 100644 --- a/command/agentproxyshared/cache/static_secret_cache_updater.go +++ b/command/agentproxyshared/cache/static_secret_cache_updater.go @@ -9,8 +9,11 @@ import ( "encoding/json" "errors" "fmt" + "io" "net/http" "net/url" + "strings" + "sync/atomic" "time" "github.com/hashicorp/go-hclog" @@ -20,6 +23,7 @@ import ( "github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb" "github.com/hashicorp/vault/command/agentproxyshared/sink" "github.com/hashicorp/vault/helper/useragent" + "github.com/hashicorp/vault/sdk/logical" "golang.org/x/exp/maps" "nhooyr.io/websocket" ) @@ -359,13 +363,23 @@ func (updater *StaticSecretCacheUpdater) openWebSocketConnection(ctx context.Con } if err != nil { + errMessage := err.Error() if resp != nil { if resp.StatusCode == http.StatusNotFound { return nil, fmt.Errorf("received 404 when opening web socket to %s, ensure Vault is Enterprise version 1.16 or above", wsURL) } + if resp.StatusCode == http.StatusForbidden { + var errBytes []byte + errBytes, err = io.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, fmt.Errorf("error occured when attempting to read error response from Vault server") + } + errMessage = string(errBytes) + } } return nil, fmt.Errorf("error returned when opening event stream web socket to %s, ensure auto-auth token"+ - " has correct permissions and Vault is Enterprise version 1.16 or above: %w", wsURL, err) + " has correct permissions and Vault is Enterprise version 1.16 or above: %s", wsURL, errMessage) } if conn == nil { @@ -379,7 +393,7 @@ func (updater *StaticSecretCacheUpdater) openWebSocketConnection(ctx context.Con // Once a token is provided to the sink, we will start the websocket and start consuming // events and updating secrets. // Run will shut down gracefully when the context is cancelled. -func (updater *StaticSecretCacheUpdater) Run(ctx context.Context) error { +func (updater *StaticSecretCacheUpdater) Run(ctx context.Context, authRenewalInProgress *atomic.Bool, invalidTokenErrCh chan error) error { updater.logger.Info("starting static secret cache updater subsystem") defer func() { updater.logger.Info("static secret cache updater subsystem stopped") @@ -415,6 +429,15 @@ tokenLoop: if err != nil { updater.logger.Error("error occurred during streaming static secret cache update events", "err", err) shouldBackoff = true + if strings.Contains(err.Error(), logical.ErrInvalidToken.Error()) && !authRenewalInProgress.Load() { + // Drain the channel in case there is an error that has already been sent but not received + select { + case <-invalidTokenErrCh: + default: + } + updater.logger.Error("received invalid token error while opening websocket") + invalidTokenErrCh <- err + } continue } } diff --git a/command/agentproxyshared/cache/static_secret_cache_updater_test.go b/command/agentproxyshared/cache/static_secret_cache_updater_test.go index f77ad4168a60..51c53b2fc335 100644 --- a/command/agentproxyshared/cache/static_secret_cache_updater_test.go +++ b/command/agentproxyshared/cache/static_secret_cache_updater_test.go @@ -7,6 +7,7 @@ import ( "context" "fmt" "sync" + syncatomic "sync/atomic" "testing" "time" @@ -156,6 +157,136 @@ func TestOpenWebSocketConnection(t *testing.T) { } } +// TestOpenWebSocketConnection_BadPolicyToken tests attempting to open a websocket +// connection to the events system using a token that has incorrect policy access +// will not trigger auto auth +func TestOpenWebSocketConnection_BadPolicyToken(t *testing.T) { + // We need a valid cluster for the connection to succeed. + cluster := minimal.NewTestSoloCluster(t, nil) + client := cluster.Cores[0].Client + + updater := testNewStaticSecretCacheUpdater(t, client) + + eventPolicy := `path "sys/events/subscribe/*" { + capabilities = ["deny"] + }` + client.Sys().PutPolicy("no_events_access", eventPolicy) + + // Create a new token with a bad policy + token, err := client.Auth().Token().Create(&api.TokenCreateRequest{ + Policies: []string{"no_events_access"}, + }) + require.NoError(t, err) + + // Set the client token to one with an invalid policy + updater.tokenSink.WriteToken(token.Auth.ClientToken) + client.SetToken(token.Auth.ClientToken) + + ctx, cancelFunc := context.WithCancel(context.Background()) + + authInProgress := &syncatomic.Bool{} + renewalChannel := make(chan error) + errCh := make(chan error) + go func() { + errCh <- updater.Run(ctx, authInProgress, renewalChannel) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + defer cancelFunc() + + // Verify that the token has been written to the sink before checking auto auth + // is not re-triggered + err = updater.streamStaticSecretEvents(ctx) + require.ErrorContains(t, err, logical.ErrPermissionDenied.Error()) + + // Auto auth should not be retriggered + timeout := time.After(2 * time.Second) + select { + case <-renewalChannel: + t.Fatal("incorrectly triggered auto auth") + case <-ctx.Done(): + t.Fatal("context was closed before auto auth could be re-triggered") + case <-timeout: + } +} + +// TestOpenWebSocketConnection_AutoAuthSelfHeal tests attempting to open a websocket +// connection to the events system using an invalid token will re-trigger +// auto auth. +func TestOpenWebSocketConnection_AutoAuthSelfHeal(t *testing.T) { + // We need a valid cluster for the connection to succeed. + cluster := minimal.NewTestSoloCluster(t, nil) + client := cluster.Cores[0].Client + + updater := testNewStaticSecretCacheUpdater(t, client) + + // Revoke the token before it can be used to open a connection to the events system + client.Auth().Token().RevokeOrphan(client.Token()) + updater.tokenSink.WriteToken(client.Token()) + time.Sleep(100 * time.Millisecond) + + ctx, cancelFunc := context.WithCancel(context.Background()) + + authInProgress := &syncatomic.Bool{} + renewalChannel := make(chan error) + errCh := make(chan error) + go func() { + errCh <- updater.Run(ctx, authInProgress, renewalChannel) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + defer cancelFunc() + + // Wait for static secret updater to begin + timeout := time.After(10 * time.Second) + + select { + case <-renewalChannel: + case <-ctx.Done(): + t.Fatal("context was closed before auto auth could be re-triggered") + case <-timeout: + t.Fatal("timed out before auto auth could be re-triggered") + } + authInProgress.Store(false) + + // Verify that auto auth is re-triggered again because another auth is "not in progress" + timeout = time.After(15 * time.Second) + select { + case <-renewalChannel: + case <-ctx.Done(): + t.Fatal("context was closed before auto auth could be re-triggered") + case <-timeout: + t.Fatal("timed out before auto auth could be re-triggered") + } + authInProgress.Store(true) + + // Verify that auto auth is NOT re-triggered again because another auth is in progress + timeout = time.After(2 * time.Second) + select { + case <-renewalChannel: + t.Fatal("auto auth was incorrectly re-triggered") + case <-ctx.Done(): + t.Fatal("context was closed before auto auth could be re-triggered") + case <-timeout: + } +} + // TestOpenWebSocketConnectionReceivesEventsDefaultMount tests that the openWebSocketConnection function // works as expected with the default KVV1 mount, and then the connection can be used to receive an event. // This acts as more of an event system sanity check than a test of the updater diff --git a/command/proxy.go b/command/proxy.go index 5bc08c04f80e..3e06f15dd623 100644 --- a/command/proxy.go +++ b/command/proxy.go @@ -15,6 +15,7 @@ import ( "sort" "strings" "sync" + "sync/atomic" "time" systemd "github.com/coreos/go-systemd/daemon" @@ -529,6 +530,58 @@ func (c *ProxyCommand) Run(args []string) int { } } + // Create the AuthHandler and the Sink Server so that we can pass AuthHandler struct + // values into the Proxy http.Handler. We will wait to actually start these servers + // once we have configured handlers for each listener below + authInProgress := &atomic.Bool{} + invalidTokenErrCh := make(chan error) + var ah *auth.AuthHandler + var ss *sink.SinkServer + if method != nil { + // Auth Handler is going to set its own retry values, so we want to + // work on a copy of the client to not affect other subsystems. + ahClient, err := c.client.CloneWithHeaders() + if err != nil { + c.UI.Error(fmt.Sprintf("Error cloning client for auth handler: %v", err)) + return 1 + } + + // Override the set namespace with the auto-auth specific namespace + if !namespaceSetByEnvironmentVariable && config.AutoAuth.Method.Namespace != "" { + ahClient.SetNamespace(config.AutoAuth.Method.Namespace) + } + + if config.DisableIdleConnsAutoAuth { + ahClient.SetMaxIdleConnections(-1) + } + + if config.DisableKeepAlivesAutoAuth { + ahClient.SetDisableKeepAlives(true) + } + + ah = auth.NewAuthHandler(&auth.AuthHandlerConfig{ + Logger: c.logger.Named("auth.handler"), + Client: ahClient, + WrapTTL: config.AutoAuth.Method.WrapTTL, + MinBackoff: config.AutoAuth.Method.MinBackoff, + MaxBackoff: config.AutoAuth.Method.MaxBackoff, + EnableReauthOnNewCredentials: config.AutoAuth.EnableReauthOnNewCredentials, + Token: previousToken, + ExitOnError: config.AutoAuth.Method.ExitOnError, + UserAgent: useragent.ProxyAutoAuthString(), + MetricsSignifier: "proxy", + }) + + authInProgress = ah.AuthInProgress + invalidTokenErrCh = ah.InvalidToken + + ss = sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: c.logger.Named("sink.server"), + Client: ahClient, + ExitAfterAuth: config.ExitAfterAuth, + }) + } + var listeners []net.Listener // Ensure we've added all the reload funcs for TLS before anyone triggers a reload. @@ -561,32 +614,29 @@ func (c *ProxyCommand) Run(args []string) int { listeners = append(listeners, ln) + apiProxyLogger.Debug("configuring inmem auto-auth sink") + inmemSink, err := inmem.New(&sink.SinkConfig{ + Logger: apiProxyLogger, + }, leaseCache) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating inmem sink for cache: %v", err)) + c.tlsReloadFuncsLock.Unlock() + return 1 + } + sinks = append(sinks, &sink.SinkConfig{ + Logger: apiProxyLogger, + Sink: inmemSink, + }) proxyVaultToken := true - var inmemSink sink.Sink if config.APIProxy != nil { - if config.APIProxy.UseAutoAuthToken { - apiProxyLogger.Debug("configuring inmem auto-auth sink") - inmemSink, err = inmem.New(&sink.SinkConfig{ - Logger: apiProxyLogger, - }, leaseCache) - if err != nil { - c.UI.Error(fmt.Sprintf("Error creating inmem sink for cache: %v", err)) - c.tlsReloadFuncsLock.Unlock() - return 1 - } - sinks = append(sinks, &sink.SinkConfig{ - Logger: apiProxyLogger, - Sink: inmemSink, - }) - } proxyVaultToken = !config.APIProxy.ForceAutoAuthToken } var muxHandler http.Handler if leaseCache != nil { - muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, leaseCache, inmemSink, proxyVaultToken) + muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, leaseCache, inmemSink, proxyVaultToken, authInProgress, invalidTokenErrCh) } else { - muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, apiProxy, inmemSink, proxyVaultToken) + muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, apiProxy, inmemSink, proxyVaultToken, authInProgress, invalidTokenErrCh) } // Parse 'require_request_header' listener config option, and wrap @@ -692,46 +742,6 @@ func (c *ProxyCommand) Run(args []string) int { // Start auto-auth and sink servers if method != nil { - // Auth Handler is going to set its own retry values, so we want to - // work on a copy of the client to not affect other subsystems. - ahClient, err := c.client.CloneWithHeaders() - if err != nil { - c.UI.Error(fmt.Sprintf("Error cloning client for auth handler: %v", err)) - return 1 - } - - // Override the set namespace with the auto-auth specific namespace - if !namespaceSetByEnvironmentVariable && config.AutoAuth.Method.Namespace != "" { - ahClient.SetNamespace(config.AutoAuth.Method.Namespace) - } - - if config.DisableIdleConnsAutoAuth { - ahClient.SetMaxIdleConnections(-1) - } - - if config.DisableKeepAlivesAutoAuth { - ahClient.SetDisableKeepAlives(true) - } - - ah := auth.NewAuthHandler(&auth.AuthHandlerConfig{ - Logger: c.logger.Named("auth.handler"), - Client: ahClient, - WrapTTL: config.AutoAuth.Method.WrapTTL, - MinBackoff: config.AutoAuth.Method.MinBackoff, - MaxBackoff: config.AutoAuth.Method.MaxBackoff, - EnableReauthOnNewCredentials: config.AutoAuth.EnableReauthOnNewCredentials, - Token: previousToken, - ExitOnError: config.AutoAuth.Method.ExitOnError, - UserAgent: useragent.ProxyAutoAuthString(), - MetricsSignifier: "proxy", - }) - - ss := sink.NewSinkServer(&sink.SinkServerConfig{ - Logger: c.logger.Named("sink.server"), - Client: ahClient, - ExitAfterAuth: config.ExitAfterAuth, - }) - g.Add(func() error { return ah.Run(ctx, method) }, func(error) { @@ -773,7 +783,7 @@ func (c *ProxyCommand) Run(args []string) int { // Add the static secret cache updater, if appropriate if updater != nil { g.Add(func() error { - err := updater.Run(ctx) + err := updater.Run(ctx, authInProgress, invalidTokenErrCh) return err }, func(error) { cancelFunc() diff --git a/command/proxy_test.go b/command/proxy_test.go index cffc93b7507a..6edaa5d5bf2f 100644 --- a/command/proxy_test.go +++ b/command/proxy_test.go @@ -240,6 +240,896 @@ auto_auth { } } +// TestProxy_NoTriggerAutoAuth_BadPolicy tests that auto auth is not re-triggered +// if Proxy uses a token with incorrect policy access. +func TestProxy_NoTriggerAutoAuth_BadPolicy(t *testing.T) { + proxyLogger := logging.NewVaultLogger(hclog.Trace) + vaultLogger := logging.NewVaultLogger(hclog.Info) + cluster := vault.NewTestCluster(t, &vault.CoreConfig{}, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.Handler, + Logger: vaultLogger, + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Add a secret to the KV engine + _, err := serverClient.Logical().Write("secret/foo", map[string]interface{}{"user": "something"}) + require.NoError(t, err) + + // Create kv read policy + noKvAccess := `path "secret/*" { +capabilities = ["deny"] +}` + err = serverClient.Sys().PutPolicy("noKvAccess", noKvAccess) + require.NoError(t, err) + + // Create a token with that policy + opts := &api.TokenCreateRequest{Policies: []string{"noKvAccess"}} + tokenResp, err := serverClient.Auth().Token().Create(opts) + require.NoError(t, err) + firstToken := tokenResp.Auth.ClientToken + + // Create token file + tokenFileName := makeTempFile(t, "token-file", firstToken) + defer os.Remove(tokenFileName) + + sinkf, err := os.CreateTemp("", "sink.test.") + if err != nil { + t.Fatal(err) + } + sink := sinkf.Name() + sinkf.Close() + os.Remove(sink) + + autoAuthConfig := fmt.Sprintf(` +auto_auth { + method { + type = "token_file" + config = { + token_file_path = "%s" + } + } + sink "file" { + config = { + path = "%s" + } + } +}`, tokenFileName, sink) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` + listener "tcp" { + address = "%s" + tls_disable = true + } + `, listenAddr) + + config := fmt.Sprintf(` + vault { + address = "%s" + tls_skip_verify = true + } + api_proxy { + use_auto_auth_token = "force" + } + %s + %s + `, serverClient.Address(), listenConfig, autoAuthConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + // Start proxy + _, cmd := testProxyCommand(t, proxyLogger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Fatalf("timeout") + } + + // Validate that the auto-auth token has been correctly attained + // and works for LookupSelf + conf := api.DefaultConfig() + conf.Address = "http://" + listenAddr + proxyClient, err := api.NewClient(conf) + if err != nil { + t.Fatal(err) + } + + proxyClient.SetToken("") + err = proxyClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + // Wait for re-triggered auto auth to write new token to sink + waitForFile := func(prevModTime time.Time) time.Time { + ticker := time.Tick(100 * time.Millisecond) + timeout := time.After(15 * time.Second) + for { + select { + case <-ticker: + case <-timeout: + return prevModTime + } + modTime, err := os.Stat(sink) + require.NoError(t, err) + if modTime.ModTime().After(prevModTime) { + return modTime.ModTime() + } + } + } + + // Wait for the token to be sent to syncs and be available to be used + initialModTime := waitForFile(time.Time{}) + req := proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self") + _ = request(t, proxyClient, req, 200) + + // Write a new token to the token file + newTokenResp, err := serverClient.Auth().Token().Create(&api.TokenCreateRequest{}) + require.NoError(t, err) + secondToken := newTokenResp.Auth.ClientToken + err = os.WriteFile(tokenFileName, []byte(secondToken), 0o600) + require.NoError(t, err) + + // Make a request to a path that the token does not have access to + req = proxyClient.NewRequest("GET", "/v1/secret/foo") + _, err = proxyClient.RawRequest(req) + require.Error(t, err) + require.ErrorContains(t, err, logical.ErrPermissionDenied.Error()) + require.NotContains(t, err.Error(), logical.ErrInvalidToken.Error()) + + // Sleep for a bit to ensure that auto auth is not re-triggered + newModTime := waitForFile(initialModTime) + if newModTime.After(initialModTime) { + t.Fatal("auto auth was incorrectly re-triggered") + } + + // Read from the sink file and verify that the token has not changed + newToken, err := os.ReadFile(sink) + require.Equal(t, firstToken, string(newToken)) + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestProxy_NoTriggerAutoAuth_ProxyTokenNotAutoAuth tests that auto auth is not re-triggered +// if Proxy uses a token that is not equal to the auto auth token +func TestProxy_NoTriggerAutoAuth_ProxyTokenNotAutoAuth(t *testing.T) { + proxyLogger := logging.NewVaultLogger(hclog.Info) + vaultLogger := logging.NewVaultLogger(hclog.Info) + cluster := vault.NewTestCluster(t, &vault.CoreConfig{}, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.Handler, + Logger: vaultLogger, + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Create a token + tokenResp, err := serverClient.Auth().Token().Create(&api.TokenCreateRequest{}) + require.NoError(t, err) + firstToken := tokenResp.Auth.ClientToken + + // Create token file + tokenFileName := makeTempFile(t, "token-file", firstToken) + defer os.Remove(tokenFileName) + + sinkf, err := os.CreateTemp("", "sink.test.") + if err != nil { + t.Fatal(err) + } + sink := sinkf.Name() + sinkf.Close() + os.Remove(sink) + + autoAuthConfig := fmt.Sprintf(` +auto_auth { + method { + type = "token_file" + config = { + token_file_path = "%s" + } + } + sink "file" { + config = { + path = "%s" + } + } +}`, tokenFileName, sink) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` + listener "tcp" { + address = "%s" + tls_disable = true + } + `, listenAddr) + + // Do not use the auto auth token if a token is provided with the proxy client + config := fmt.Sprintf(` + vault { + address = "%s" + tls_skip_verify = true + } + api_proxy { + use_auto_auth_token = true + } + %s + %s + `, serverClient.Address(), listenConfig, autoAuthConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + // Start proxy + _, cmd := testProxyCommand(t, proxyLogger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Fatalf("timeout") + } + + // Validate that the auto-auth token has been correctly attained + // and works for LookupSelf + conf := api.DefaultConfig() + conf.Address = "http://" + listenAddr + proxyClient, err := api.NewClient(conf) + if err != nil { + t.Fatal(err) + } + + proxyClient.SetToken(firstToken) + err = proxyClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + // Wait for re-triggered auto auth to write new token to sink + waitForFile := func(prevModTime time.Time) time.Time { + ticker := time.Tick(100 * time.Millisecond) + timeout := time.After(15 * time.Second) + for { + select { + case <-ticker: + case <-timeout: + return prevModTime + } + modTime, err := os.Stat(sink) + require.NoError(t, err) + if modTime.ModTime().After(prevModTime) { + return modTime.ModTime() + } + } + } + + // Wait for the token is available to be used + createTime := waitForFile(time.Time{}) + require.NoError(t, err) + req := proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self") + _, err = proxyClient.RawRequest(req) + require.NoError(t, err) + + // Revoke token + req = serverClient.NewRequest("PUT", "/v1/auth/token/revoke") + req.BodyBytes = []byte(fmt.Sprintf(`{ + "token": "%s" + }`, firstToken)) + _ = request(t, serverClient, req, 204) + + // Write a new token to the token file + newTokenResp, err := serverClient.Auth().Token().Create(&api.TokenCreateRequest{}) + require.NoError(t, err) + secondToken := newTokenResp.Auth.ClientToken + err = os.WriteFile(tokenFileName, []byte(secondToken), 0o600) + require.NoError(t, err) + + // Proxy uses revoked token to make request and should result in an error + proxyClient.SetToken("random token") + req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self") + _, err = proxyClient.RawRequest(req) + require.Error(t, err) + + // Wait to see if the sink file is modified + newModTime := waitForFile(createTime) + if newModTime.After(createTime) { + t.Fatal("auto auth was incorrectly re-triggered") + } + + // Read from the sink and verify that the token has not changed + newToken, err := os.ReadFile(sink) + require.Equal(t, firstToken, string(newToken)) + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestProxy_ReTriggerAutoAuth_ForceAutoAuthToken tests that auto auth is re-triggered +// if Proxy always forcibly uses the auto auth token +func TestProxy_ReTriggerAutoAuth_ForceAutoAuthToken(t *testing.T) { + proxyLogger := logging.NewVaultLogger(hclog.Trace) + vaultLogger := logging.NewVaultLogger(hclog.Info) + cluster := vault.NewTestCluster(t, &vault.CoreConfig{}, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.Handler, + Logger: vaultLogger, + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Create a token + tokenResp, err := serverClient.Auth().Token().Create(&api.TokenCreateRequest{}) + require.NoError(t, err) + firstToken := tokenResp.Auth.ClientToken + + // Create token file + tokenFileName := makeTempFile(t, "token-file", firstToken) + defer os.Remove(tokenFileName) + + sinkf, err := os.CreateTemp("", "sink.test.") + if err != nil { + t.Fatal(err) + } + sink := sinkf.Name() + sinkf.Close() + os.Remove(sink) + + autoAuthConfig := fmt.Sprintf(` +auto_auth { + method { + type = "token_file" + config = { + token_file_path = "%s" + } + } + sink "file" { + config = { + path = "%s" + } + } +}`, tokenFileName, sink) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` + listener "tcp" { + address = "%s" + tls_disable = true + } + `, listenAddr) + + // Do not use the auto auth token if a token is provided with the proxy client + config := fmt.Sprintf(` + vault { + address = "%s" + tls_skip_verify = true + } + api_proxy { + use_auto_auth_token = "force" + } + %s + %s + `, serverClient.Address(), listenConfig, autoAuthConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + // Start proxy + _, cmd := testProxyCommand(t, proxyLogger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Fatalf("timeout") + } + + // Validate that the auto-auth token has been correctly attained + // and works for LookupSelf + conf := api.DefaultConfig() + conf.Address = "http://" + listenAddr + proxyClient, err := api.NewClient(conf) + if err != nil { + t.Fatal(err) + } + + proxyClient.SetToken(firstToken) + err = proxyClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + // Wait for re-triggered auto auth to write new token to sink + waitForFile := func(prevModTime time.Time) time.Time { + ticker := time.Tick(100 * time.Millisecond) + timeout := time.After(15 * time.Second) + for { + select { + case <-ticker: + case <-timeout: + return prevModTime + } + modTime, err := os.Stat(sink) + require.NoError(t, err) + if modTime.ModTime().After(prevModTime) { + return modTime.ModTime() + } + } + } + + // Wait for the token is available to be used + createTime := waitForFile(time.Time{}) + require.NoError(t, err) + req := proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self") + _, err = proxyClient.RawRequest(req) + require.NoError(t, err) + + // Revoke token + req = serverClient.NewRequest("PUT", "/v1/auth/token/revoke") + req.BodyBytes = []byte(fmt.Sprintf(`{ + "token": "%s" + }`, firstToken)) + _ = request(t, serverClient, req, 204) + + // Create new token + newTokenResp, err := serverClient.Auth().Token().Create(&api.TokenCreateRequest{}) + require.NoError(t, err) + secondToken := newTokenResp.Auth.ClientToken + + // Proxy uses the same token in the token file to make a request, which should result in error + req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self") + _, err = proxyClient.RawRequest(req) + require.Error(t, err) + + // Write a new token to the token file so that auto auth can write new token to sink + err = os.WriteFile(tokenFileName, []byte(secondToken), 0o600) + require.NoError(t, err) + + // Wait to see if that the sink file is modified + waitForFile(createTime) + + // Read from the sink and verify that the sink contains the new token + newToken, err := os.ReadFile(sink) + require.Equal(t, secondToken, string(newToken)) + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestProxy_ReTriggerAutoAuth_ProxyIsAutoAuthToken tests that auto auth is re-triggered +// the proxy client uses a token that is equal to the auto auth token +func TestProxy_ReTriggerAutoAuth_ProxyIsAutoAuthToken(t *testing.T) { + proxyLogger := logging.NewVaultLogger(hclog.Trace) + vaultLogger := logging.NewVaultLogger(hclog.Info) + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + }, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.Handler, + Logger: vaultLogger, + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Enable the approle auth method + req := serverClient.NewRequest("POST", "/v1/sys/auth/approle") + req.BodyBytes = []byte(`{ + "type": "approle" + }`) + request(t, serverClient, req, 204) + + // Create a named role + req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role") + req.BodyBytes = []byte(`{ + "secret_id_num_uses": "10", + "secret_id_ttl": "1m", + "token_max_ttl": "4m", + "token_num_uses": "10", + "token_ttl": "4m", + "policies": "default" + }`) + request(t, serverClient, req, 204) + + // Fetch the RoleID of the named role + req = serverClient.NewRequest("GET", "/v1/auth/approle/role/test-role/role-id") + body := request(t, serverClient, req, 200) + data := body["data"].(map[string]interface{}) + roleID := data["role_id"].(string) + + // Get a SecretID issued against the named role + req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role/secret-id") + body = request(t, serverClient, req, 200) + data = body["data"].(map[string]interface{}) + secretID := data["secret_id"].(string) + + // Write the RoleID and SecretID to temp files + roleIDPath := makeTempFile(t, "role_id.txt", roleID+"\n") + secretIDPath := makeTempFile(t, "secret_id.txt", secretID+"\n") + defer os.Remove(roleIDPath) + defer os.Remove(secretIDPath) + + sinkf, err := os.CreateTemp("", "sink.test.") + if err != nil { + t.Fatal(err) + } + sink := sinkf.Name() + sinkf.Close() + os.Remove(sink) + + autoAuthConfig := fmt.Sprintf(` +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + } + } + + sink "file" { + config = { + path = "%s" + } + } +}`, roleIDPath, secretIDPath, sink) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +api_proxy { + use_auto_auth_token = true +} +%s +%s +`, serverClient.Address(), listenConfig, autoAuthConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + // Start proxy + _, cmd := testProxyCommand(t, proxyLogger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + // Validate that the auto-auth token has been correctly attained + // and works for LookupSelf + conf := api.DefaultConfig() + conf.Address = "http://" + listenAddr + proxyClient, err := api.NewClient(conf) + if err != nil { + t.Fatal(err) + } + + err = proxyClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + // Wait for re-triggered auto auth to write new token to sink + waitForFile := func(prevModTime time.Time) { + ticker := time.Tick(100 * time.Millisecond) + timeout := time.After(15 * time.Second) + for { + select { + case <-ticker: + case <-timeout: + t.Fatal("timed out waiting for re-triggered auto auth to complete") + } + modTime, err := os.Stat(sink) + require.NoError(t, err) + if modTime.ModTime().After(prevModTime) { + return + } + } + } + + // Wait for the token to be sent to syncs and be available to be used + waitForFile(time.Time{}) + oldToken, err := os.ReadFile(sink) + require.NoError(t, err) + prevModTime, err := os.Stat(sink) + require.NoError(t, err) + + // Set proxy token + proxyClient.SetToken(string(oldToken)) + + // Make request using proxy client to test that token is valid + req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self") + body = request(t, proxyClient, req, 200) + + // Revoke token + req = serverClient.NewRequest("PUT", "/v1/auth/token/revoke") + req.BodyBytes = []byte(fmt.Sprintf(`{ + "token": "%s" + }`, oldToken)) + body = request(t, serverClient, req, 204) + + // Proxy uses revoked token to make request and should result in an error + req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self") + _, err = proxyClient.RawRequest(req) + require.Error(t, err) + + // Wait for new token to be written and available to use + waitForFile(prevModTime.ModTime()) + + // Verify new token is not equal to the old token + newToken, err := os.ReadFile(sink) + require.NoError(t, err) + require.NotEqual(t, string(newToken), string(oldToken)) + + // Verify that proxy no longer fails when making a request with the new token + proxyClient.SetToken(string(newToken)) + req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self") + body = request(t, proxyClient, req, 200) + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestProxy_ReTriggerAutoAuth_RevokedToken tests that auto auth is re-triggered +// when Proxy uses a revoked auto auth token to make a request +func TestProxy_ReTriggerAutoAuth_RevokedToken(t *testing.T) { + proxyLogger := logging.NewVaultLogger(hclog.Trace) + vaultLogger := logging.NewVaultLogger(hclog.Info) + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + }, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.Handler, + Logger: vaultLogger, + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Enable the approle auth method + req := serverClient.NewRequest("POST", "/v1/sys/auth/approle") + req.BodyBytes = []byte(`{ + "type": "approle" + }`) + request(t, serverClient, req, 204) + + // Create a named role + req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role") + req.BodyBytes = []byte(`{ + "secret_id_num_uses": "10", + "secret_id_ttl": "1m", + "token_max_ttl": "4m", + "token_num_uses": "10", + "token_ttl": "4m", + "policies": "default" + }`) + request(t, serverClient, req, 204) + + // Fetch the RoleID of the named role + req = serverClient.NewRequest("GET", "/v1/auth/approle/role/test-role/role-id") + body := request(t, serverClient, req, 200) + data := body["data"].(map[string]interface{}) + roleID := data["role_id"].(string) + + // Get a SecretID issued against the named role + req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role/secret-id") + body = request(t, serverClient, req, 200) + data = body["data"].(map[string]interface{}) + secretID := data["secret_id"].(string) + + // Write the RoleID and SecretID to temp files + roleIDPath := makeTempFile(t, "role_id.txt", roleID+"\n") + secretIDPath := makeTempFile(t, "secret_id.txt", secretID+"\n") + defer os.Remove(roleIDPath) + defer os.Remove(secretIDPath) + + sinkf, err := os.CreateTemp("", "sink.test.") + if err != nil { + t.Fatal(err) + } + sink := sinkf.Name() + sinkf.Close() + os.Remove(sink) + + autoAuthConfig := fmt.Sprintf(` +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + } + } + + sink "file" { + config = { + path = "%s" + } + } +}`, roleIDPath, secretIDPath, sink) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +api_proxy { + use_auto_auth_token = "force" +} +%s +%s +`, serverClient.Address(), listenConfig, autoAuthConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + // Start proxy + _, cmd := testProxyCommand(t, proxyLogger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + // Validate that the auto-auth token has been correctly attained + // and works for LookupSelf + conf := api.DefaultConfig() + conf.Address = "http://" + listenAddr + proxyClient, err := api.NewClient(conf) + if err != nil { + t.Fatal(err) + } + + proxyClient.SetToken("") + err = proxyClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + // Wait for re-triggered auto auth to write new token to sink + waitForFile := func(prevModTime time.Time) { + ticker := time.Tick(100 * time.Millisecond) + timeout := time.After(15 * time.Second) + for { + select { + case <-ticker: + case <-timeout: + t.Fatal("timed out waiting for re-triggered auto auth to complete") + } + modTime, err := os.Stat(sink) + require.NoError(t, err) + if modTime.ModTime().After(prevModTime) { + return + } + } + } + + // Wait for the token to be sent to syncs and be available to be used + waitForFile(time.Time{}) + req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self") + body = request(t, proxyClient, req, 200) + + oldToken, err := os.ReadFile(sink) + require.NoError(t, err) + prevModTime, err := os.Stat(sink) + require.NoError(t, err) + + // Revoke token + req = serverClient.NewRequest("PUT", "/v1/auth/token/revoke") + req.BodyBytes = []byte(fmt.Sprintf(`{ + "token": "%s" + }`, oldToken)) + body = request(t, serverClient, req, 204) + + // Proxy uses revoked token to make request and should result in an error + req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self") + _, err = proxyClient.RawRequest(req) + require.Error(t, err) + + // Wait for new token to be written and available to use + waitForFile(prevModTime.ModTime()) + + // Verify new token is not equal to the old token + newToken, err := os.ReadFile(sink) + require.NoError(t, err) + require.NotEqual(t, string(newToken), string(oldToken)) + + // Verify that proxy no longer fails when making a request + req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self") + body = request(t, proxyClient, req, 200) + + close(cmd.ShutdownCh) + wg.Wait() +} + // TestProxy_AutoAuth_UserAgent tests that the User-Agent sent // to Vault by Vault Proxy is correct when performing Auto-Auth. // Uses the custom handler userAgentHandler (defined above) so @@ -687,9 +1577,9 @@ vault { // TestProxy_ApiProxy_Retry Tests the retry functionalities of Vault Proxy's API Proxy func TestProxy_ApiProxy_Retry(t *testing.T) { - //---------------------------------------------------- + // ---------------------------------------------------- // Start the server and proxy - //---------------------------------------------------- + // ---------------------------------------------------- logger := logging.NewVaultLogger(hclog.Trace) var h handler cluster := vault.NewTestCluster(t, @@ -730,6 +1620,7 @@ func TestProxy_ApiProxy_Retry(t *testing.T) { intRef := func(i int) *int { return &i } + // start test cases here testCases := map[string]struct { retries *int From fc7b3232e33aa1016c1f6f4aeaace6e11a4894b3 Mon Sep 17 00:00:00 2001 From: Peter Wilson Date: Tue, 9 Apr 2024 21:02:58 +0100 Subject: [PATCH 2/4] Update Test_NoAutoAuthSelfHealing_BadPolicy (#26323) --- .../agent/agent_auto_auth_self_heal_test.go | 169 ++++++++---------- 1 file changed, 76 insertions(+), 93 deletions(-) diff --git a/command/agent/agent_auto_auth_self_heal_test.go b/command/agent/agent_auto_auth_self_heal_test.go index d790af9dfeed..02849ff06e53 100644 --- a/command/agent/agent_auto_auth_self_heal_test.go +++ b/command/agent/agent_auto_auth_self_heal_test.go @@ -23,10 +23,7 @@ import ( "github.com/hashicorp/vault/command/agentproxyshared/sink/file" "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/helper/testhelpers/minimal" - vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/helper/pointerutil" - "github.com/hashicorp/vault/vault" "github.com/stretchr/testify/require" ) @@ -64,9 +61,7 @@ func TestAutoAuthSelfHealing_TokenFileAuth_SinkOutput(t *testing.T) { serverClient := cluster.Cores[0].Client // Create token - secret, err := serverClient.Auth().Token().Create(&api.TokenCreateRequest{ - Policies: []string{"test-autoauth"}, - }) + secret, err := serverClient.Auth().Token().Create(&api.TokenCreateRequest{}) require.NoError(t, err) require.NotNil(t, secret) require.NotNil(t, secret.Auth) @@ -143,9 +138,14 @@ func TestAutoAuthSelfHealing_TokenFileAuth_SinkOutput(t *testing.T) { StaticSecretRenderInt: secretRenderInterval, }, AutoAuth: &agentConfig.AutoAuth{ - Sinks: []*agentConfig.Sink{{Type: "file", Config: map[string]interface{}{ - "path": pathLookupSelf, - }}}, + Sinks: []*agentConfig.Sink{ + { + Type: "file", + Config: map[string]interface{}{ + "path": pathLookupSelf, + }, + }, + }, }, ExitAfterAuth: false, }, @@ -235,59 +235,59 @@ func TestAutoAuthSelfHealing_TokenFileAuth_SinkOutput(t *testing.T) { // is not re-triggered if a token with incorrect policy access // is used to render a template func Test_NoAutoAuthSelfHealing_BadPolicy(t *testing.T) { - logger := logging.NewVaultLogger(hclog.Trace) - cluster := vault.NewTestCluster(t, - &vault.CoreConfig{}, - &vault.TestClusterOptions{ - NumCores: 1, - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - - vault.TestWaitActive(t, cluster.Cores[0].Core) - serverClient := cluster.Cores[0].Client + // Unset the environment variable so that agent picks up the right test cluster address + t.Setenv(api.EnvVaultAddress, "") - // Unset the environment variable so that agent picks up the right test - // cluster address - defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - os.Unsetenv(api.EnvVaultAddress) + tmpDir := t.TempDir() + pathKVData := filepath.Join(tmpDir, "kvData") + pathVaultToken := filepath.Join(tmpDir, "vault-token") + pathTokenFile := filepath.Join(tmpDir, "token-file") + policyName := "kv-access" + secretRenderInterval := 1 * time.Second + contextTimeout := 30 * time.Second - // Create temp dir for this test run - tmpDir, err := os.MkdirTemp("", "TestAutoAuth_SelfHealing") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) + cluster := minimal.NewTestSoloCluster(t, nil) + logger := corehelpers.NewTestLogger(t) + serverClient := cluster.Cores[0].Client // Write a policy with correct access to the secrets - serverClient.Sys().PutPolicy("kv-access", kvAccessPolicy) + err := serverClient.Sys().PutPolicy(policyName, kvAccessPolicy) + require.NoError(t, err) // Create a token without enough policy access to the kv secrets - secret, err := serverClient.Auth().Token().Create(&api.TokenCreateRequest{}) + secret, err := serverClient.Auth().Token().Create(&api.TokenCreateRequest{ + Policies: []string{"test-autoauth"}, + }) require.NoError(t, err) + require.NotNil(t, secret) + require.NotNil(t, secret.Auth) + require.NotEmpty(t, secret.Auth.ClientToken) + require.Len(t, secret.Auth.Policies, 2) + require.Contains(t, secret.Auth.Policies, "default") + require.Contains(t, secret.Auth.Policies, "test-autoauth") token := secret.Auth.ClientToken // Write token to vault-token file - tokenFilePath := filepath.Join(tmpDir, "vault-token") - tokenFile, err := os.Create(tokenFilePath) + tokenFile, err := os.Create(pathVaultToken) require.NoError(t, err) _, err = tokenFile.WriteString(token) require.NoError(t, err) err = tokenFile.Close() require.NoError(t, err) - defer os.Remove(tokenFilePath) - require.NoError(t, err) - - ctx, cancel := context.WithCancel(context.Background()) + // Give us some leeway of 3 errors 1 from each of: auth handler, sink server template server. + errCh := make(chan error, 3) + ctx, cancel := context.WithTimeout(context.Background(), contextTimeout) // Create auth handler am, err := tokenfile.NewTokenFileAuthMethod(&auth.AuthConfig{ Logger: logger.Named("auth.method"), Config: map[string]interface{}{ - "token_file_path": filepath.Join(filepath.Join(tmpDir, "vault-token")), + "token_file_path": pathVaultToken, }, }) require.NoError(t, err) + ahConfig := &auth.AuthHandlerConfig{ Logger: logger.Named("auth.handler"), Client: serverClient, @@ -296,34 +296,20 @@ func Test_NoAutoAuthSelfHealing_BadPolicy(t *testing.T) { ExitOnError: false, } ah := auth.NewAuthHandler(ahConfig) - errCh := make(chan error) - go func() { errCh <- ah.Run(ctx, am) }() - defer func() { - select { - case <-ctx.Done(): - case err := <-errCh: - if err != nil { - t.Fatal(err) - } - } - }() // Create sink file server - sinkFilePath := filepath.Join(tmpDir, "token-file") - _, err = os.Create(sinkFilePath) - defer os.Remove(sinkFilePath) + _, err = os.Create(pathTokenFile) require.NoError(t, err) config := &sink.SinkConfig{ Logger: logger.Named("sink.file"), Config: map[string]interface{}{ - "path": sinkFilePath, + "path": pathTokenFile, }, } - fs, err := file.NewFileSink(config) if err != nil { t.Fatal(err) @@ -334,30 +320,20 @@ func Test_NoAutoAuthSelfHealing_BadPolicy(t *testing.T) { Logger: logger.Named("sink.server"), Client: serverClient, }) - go func() { errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}, ah.AuthInProgress) }() - defer func() { - select { - case <-ctx.Done(): - case err := <-errCh: - if err != nil { - t.Fatal(err) - } - } - }() // Create template server sc := template.ServerConfig{ - Logger: logging.NewVaultLogger(hclog.Trace), + Logger: logger.Named("template.server"), AgentConfig: &agentConfig.Config{ Vault: &agentConfig.Vault{ Address: serverClient.Address(), TLSSkipVerify: true, }, TemplateConfig: &agentConfig.TemplateConfig{ - StaticSecretRenderInt: time.Second * 5, + StaticSecretRenderInt: secretRenderInterval, }, // Need to crate at least one sink output so that it does not exit after rendering AutoAuth: &agentConfig.AutoAuth{ @@ -365,7 +341,7 @@ func Test_NoAutoAuthSelfHealing_BadPolicy(t *testing.T) { { Type: "file", Config: map[string]interface{}{ - "path": filepath.Join(filepath.Join(tmpDir, "kvData")), + "path": pathKVData, }, }, }, @@ -378,60 +354,67 @@ func Test_NoAutoAuthSelfHealing_BadPolicy(t *testing.T) { } templateTest := &ctconfig.TemplateConfig{ - Contents: pointerutil.StringPtr(kvDataTemplateContents), + Contents: pointerutil.StringPtr(kvDataTemplateContents), + Destination: pointerutil.StringPtr(pathKVData), } - dstFile := fmt.Sprintf("%s/%s", tmpDir, "kvData") - templateTest.Destination = pointerutil.StringPtr(dstFile) templatesToRender := []*ctconfig.TemplateConfig{templateTest} var server *template.Server server = template.NewServer(&sc) - go func() { errCh <- server.Run(ctx, ah.TemplateTokenCh, templatesToRender, ah.AuthInProgress, ah.InvalidToken) }() - defer func() { - select { - case <-ctx.Done(): - case err := <-errCh: - if err != nil { - t.Fatal(err) - } - } - }() - // Must be done at the very end so that nothing is blocking - defer cancel() - - // Trigger template render + // Trigger template render (mark the time as being earlier, based on the render interval) + preTriggerTime := time.Now().Add(-secretRenderInterval) ah.TemplateTokenCh <- token - _, err = waitForFiles(t, filepath.Join(tmpDir, "token-file"), time.Time{}) + _, err = waitForFiles(t, pathTokenFile, preTriggerTime) require.NoError(t, err) - tokenInSink, err := os.ReadFile(filepath.Join(tmpDir, "token-file")) + tokenInSink, err := os.ReadFile(pathTokenFile) require.NoError(t, err) - require.Equal(t, string(tokenInSink), token) + require.Equal(t, token, string(tokenInSink)) // Create new token with the correct policy access tokenSecret, err := serverClient.Auth().Token().Create(&api.TokenCreateRequest{ - Policies: []string{"kv-access"}, + Policies: []string{policyName}, }) require.NoError(t, err) + require.NotNil(t, tokenSecret) + require.NotNil(t, tokenSecret.Auth) + require.NotEmpty(t, tokenSecret.Auth.ClientToken) + require.Len(t, tokenSecret.Auth.Policies, 2) + require.Contains(t, tokenSecret.Auth.Policies, "default") + require.Contains(t, tokenSecret.Auth.Policies, policyName) newToken := tokenSecret.Auth.ClientToken // Write token to file - err = os.WriteFile(filepath.Join(tmpDir, "vault-token"), []byte(token), 0o600) + err = os.WriteFile(pathVaultToken, []byte(token), 0o600) require.NoError(t, err) // Wait for any potential *incorrect* re-triggers of auto auth - time.Sleep(time.Second * 5) + time.Sleep(secretRenderInterval * 3) // Auto auth should not have been re-triggered because of just a permission denied error // Verify that the new token has NOT been written to the token sink - tokenInSink, err = os.ReadFile(filepath.Join(tmpDir, "token-file")) + tokenInSink, err = os.ReadFile(pathTokenFile) require.NoError(t, err) - require.NotEqual(t, string(tokenInSink), newToken) - require.Equal(t, string(tokenInSink), token) + require.NotEqual(t, newToken, string(tokenInSink)) + require.Equal(t, token, string(tokenInSink)) + + cancel() + wrapUpTimeout := 5 * time.Second + for { + select { + case <-time.After(wrapUpTimeout): + t.Fatal("test timed out") + case err := <-errCh: + require.NoError(t, err) + case <-ctx.Done(): + // We can finish the test ourselves + return + } + } } func waitForFiles(t *testing.T, filePath string, prevModTime time.Time) (os.FileInfo, error) { From 1e3efed2fa0a40b78922b9e2a3171d75d262fca7 Mon Sep 17 00:00:00 2001 From: divyaac Date: Tue, 9 Apr 2024 13:40:15 -0700 Subject: [PATCH 3/4] Documentation for Self Healing Auto Auth Proxy and Agent (#26324) * Documentation for Self Healing Auto Auth Proxy and Agent' * Added or * Edited one more thing --- website/content/docs/agent-and-proxy/autoauth/index.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/content/docs/agent-and-proxy/autoauth/index.mdx b/website/content/docs/agent-and-proxy/autoauth/index.mdx index f24a2d03e8aa..3bee8a43df31 100644 --- a/website/content/docs/agent-and-proxy/autoauth/index.mdx +++ b/website/content/docs/agent-and-proxy/autoauth/index.mdx @@ -22,7 +22,8 @@ When Vault Agent or Vault Proxy are started with Auto-Auth enabled, it will atte Vault token using the configured Method. On failure, it will exponentially back off and then retry. On success, unless the auth method is configured to wrap the tokens, it will keep the resulting token renewed until renewal is no longer -allowed or fails, at which point it will attempt to reauthenticate. +allowed. If renewal fails, the token has been revoked, the token has exceeded the maximum number of uses, +or the token is an otherwise invalid value, it will attempt to reauthenticate. Every time an authentication is successful, the token is written to the configured Sinks, subject to their configuration. From 009702cae0378ac1fd4cbe72da2856822c94c10b Mon Sep 17 00:00:00 2001 From: claire bontempo <68122737+hellobontempo@users.noreply.github.com> Date: Tue, 9 Apr 2024 13:53:16 -0700 Subject: [PATCH 4/4] UI: Convert client count utils to typescript (#26262) * cleanup namespaceArrayToObject method * WIP typescript conversion * WIP typescripted destructured block * slowly making progress.... * WIP move all types to util type file, separate returns in formatByMonths * namespaceArrayToObject is working?!? * fix mirage handler not generating months when queries are after upgrade * wow, the types are actually working omg * add comments and update client-count-utils test * delete old js file * remove types from activity model * remove comment * reuse totalclients type to minimize places we add types * commit file with type changes for git diff * delete util file again * address PR feedback and move type declarations to util file * remove unused types * update tests, use client helper in dashboard clients test * remove typo * make modifications with updated combined activity response from the backend --- ui/app/components/clients/activity.ts | 50 +-- ui/app/components/clients/charts/line.ts | 7 +- .../clients/charts/vertical-bar-basic.ts | 5 +- ui/app/components/clients/page/counts.ts | 2 +- ui/app/components/clients/page/token.ts | 15 +- ui/lib/core/addon/utils/client-count-utils.js | 213 ------------- ui/lib/core/addon/utils/client-count-utils.ts | 295 ++++++++++++++++++ ui/mirage/handlers/clients.js | 6 +- ui/tests/helpers/clients.js | 184 ++++++++++- .../dashboard/client-count-card-test.js | 123 ++------ .../utils/client-count-utils-test.js | 289 ++++++++++++++--- ui/types/vault/charts/client-counts.d.ts | 14 +- ui/types/vault/models/clients/activity.d.ts | 41 +-- 13 files changed, 786 insertions(+), 458 deletions(-) delete mode 100644 ui/lib/core/addon/utils/client-count-utils.js create mode 100644 ui/lib/core/addon/utils/client-count-utils.ts diff --git a/ui/app/components/clients/activity.ts b/ui/app/components/clients/activity.ts index f2bf3fac6fe8..ea25b3b0bc38 100644 --- a/ui/app/components/clients/activity.ts +++ b/ui/app/components/clients/activity.ts @@ -10,15 +10,16 @@ import Component from '@glimmer/component'; import { isSameMonth, fromUnixTime } from 'date-fns'; import { parseAPITimestamp } from 'core/utils/date-formatters'; import { calculateAverage } from 'vault/utils/chart-helpers'; -import { filterVersionHistory } from 'core/utils/client-count-utils'; +import { filterVersionHistory, hasMountsKey, hasNamespacesKey } from 'core/utils/client-count-utils'; import type ClientsActivityModel from 'vault/models/clients/activity'; -import type { - ClientActivityNewClients, - ClientActivityMonthly, - ClientActivityResourceByKey, -} from 'vault/models/clients/activity'; import type ClientsVersionHistoryModel from 'vault/models/clients/version-history'; +import type { + ByMonthNewClients, + MountNewClients, + NamespaceByKey, + NamespaceNewClients, +} from 'core/utils/client-count-utils'; interface Args { isSecretsSyncActivated?: boolean; @@ -33,10 +34,8 @@ interface Args { export default class ClientsActivityComponent extends Component { average = ( data: - | ClientActivityMonthly[] - | (ClientActivityResourceByKey | undefined)[] - | (ClientActivityNewClients | undefined)[] - | undefined, + | (ByMonthNewClients | NamespaceNewClients | MountNewClients | undefined)[] + | (NamespaceByKey | undefined)[], key: string ) => { return calculateAverage(data, key); @@ -65,18 +64,18 @@ export default class ClientsActivityComponent extends Component { return activity.byMonth; } const namespaceData = activity.byMonth - .map((m) => m.namespaces_by_key[namespace as keyof typeof m.namespaces_by_key]) + ?.map((m) => m.namespaces_by_key[namespace]) .filter((d) => d !== undefined); if (!mountPath) { - return namespaceData.length === 0 ? undefined : namespaceData; + return namespaceData || []; } - const mountData = mountPath - ? namespaceData.map((namespace) => namespace?.mounts_by_key[mountPath]).filter((d) => d !== undefined) - : namespaceData; + const mountData = namespaceData + ?.map((namespace) => namespace?.mounts_by_key[mountPath]) + .filter((d) => d !== undefined); - return mountData.length === 0 ? undefined : mountData; + return mountData || []; } get filteredActivityByNamespace() { @@ -119,11 +118,13 @@ export default class ClientsActivityComponent extends Component { return filterVersionHistory(versionHistory, activity.startTime, activity.endTime); } - // (object) single month new client data with total counts + array of namespace breakdown + // (object) single month new client data with total counts and array of + // either namespaces or mounts get newClientCounts() { - if (this.isDateRange || !this.byMonthActivityData) { + if (this.isDateRange || this.byMonthActivityData.length === 0) { return null; } + return this.byMonthActivityData[0]?.new_clients; } @@ -140,13 +141,14 @@ export default class ClientsActivityComponent extends Component { // new client data for horizontal bar chart get newClientAttribution() { // new client attribution only available in a single, historical month (not a date range or current month) - if (this.isDateRange || this.isCurrentMonth) return null; + if (this.isDateRange || this.isCurrentMonth || !this.newClientCounts) return null; - if (this.args.namespace) { - return this.newClientCounts?.mounts || null; - } else { - return this.newClientCounts?.namespaces || null; - } + const newCounts = this.newClientCounts; + if (this.args.namespace && hasMountsKey(newCounts)) return newCounts?.mounts; + + if (hasNamespacesKey(newCounts)) return newCounts?.namespaces; + + return null; } get hasAttributionData() { diff --git a/ui/app/components/clients/charts/line.ts b/ui/app/components/clients/charts/line.ts index da1dda11ab9b..3c737a85e202 100644 --- a/ui/app/components/clients/charts/line.ts +++ b/ui/app/components/clients/charts/line.ts @@ -9,8 +9,9 @@ import { parseAPITimestamp } from 'core/utils/date-formatters'; import { format, isValid } from 'date-fns'; import { debug } from '@ember/debug'; -import type { Count, MonthlyChartData, Timestamp } from 'vault/vault/charts/client-counts'; import type ClientsVersionHistoryModel from 'vault/models/clients/version-history'; +import type { MonthlyChartData, Timestamp } from 'vault/vault/charts/client-counts'; +import type { TotalClients } from 'core/utils/client-count-utils'; interface Args { dataset: MonthlyChartData[]; @@ -67,7 +68,7 @@ export default class LineChart extends Component { const upgradeMessage = this.getUpgradeMessage(datum); return { x: timestamp, - y: (datum[this.yKey as keyof Count] as number) ?? null, + y: (datum[this.yKey as keyof TotalClients] as number) ?? null, new: this.getNewClients(datum), tooltipUpgrade: upgradeMessage, month: datum.month, @@ -123,7 +124,7 @@ export default class LineChart extends Component { } getNewClients(datum: MonthlyChartData) { if (!datum?.new_clients) return 0; - return (datum?.new_clients[this.yKey as keyof Count] as number) || 0; + return (datum?.new_clients[this.yKey as keyof TotalClients] as number) || 0; } hasValue = (count: number | null) => { diff --git a/ui/app/components/clients/charts/vertical-bar-basic.ts b/ui/app/components/clients/charts/vertical-bar-basic.ts index e81ad07a86f7..867a5d5b743e 100644 --- a/ui/app/components/clients/charts/vertical-bar-basic.ts +++ b/ui/app/components/clients/charts/vertical-bar-basic.ts @@ -9,7 +9,8 @@ import { BAR_WIDTH, formatNumbers } from 'vault/utils/chart-helpers'; import { formatNumber } from 'core/helpers/format-number'; import { parseAPITimestamp } from 'core/utils/date-formatters'; -import type { Count, MonthlyChartData } from 'vault/vault/charts/client-counts'; +import type { MonthlyChartData } from 'vault/vault/charts/client-counts'; +import type { TotalClients } from 'core/utils/client-count-utils'; interface Args { data: MonthlyChartData[]; @@ -51,7 +52,7 @@ export default class VerticalBarBasic extends Component { get chartData() { return this.args.data.map((d): ChartData => { const xValue = d.timestamp as string; - const yValue = (d[this.args.dataKey as keyof Count] as number) ?? null; + const yValue = (d[this.args.dataKey as keyof TotalClients] as number) ?? null; return { x: parseAPITimestamp(xValue, 'M/yy') as string, y: yValue, diff --git a/ui/app/components/clients/page/counts.ts b/ui/app/components/clients/page/counts.ts index 23da4c52f0f1..59bf06a5e370 100644 --- a/ui/app/components/clients/page/counts.ts +++ b/ui/app/components/clients/page/counts.ts @@ -162,7 +162,7 @@ export default class ClientsCountsPageComponent extends Component { } @action - onDateChange(dateObject: { dateType: string; monthIdx: string; year: string }) { + onDateChange(dateObject: { dateType: string; monthIdx: number; year: number }) { const { dateType, monthIdx, year } = dateObject; const { config } = this.args; const currentTimestamp = getUnixTime(timestamp.now()); diff --git a/ui/app/components/clients/page/token.ts b/ui/app/components/clients/page/token.ts index fd00b3203861..4d653ec3eff9 100644 --- a/ui/app/components/clients/page/token.ts +++ b/ui/app/components/clients/page/token.ts @@ -6,10 +6,11 @@ import ActivityComponent from '../activity'; import type { - ClientActivityNewClients, - ClientActivityMonthly, - ClientActivityResourceByKey, -} from 'vault/vault/models/clients/activity'; + ByMonthNewClients, + MountNewClients, + NamespaceByKey, + NamespaceNewClients, +} from 'core/utils/client-count-utils'; export default class ClientsTokenPageComponent extends ActivityComponent { legend = [ @@ -19,10 +20,8 @@ export default class ClientsTokenPageComponent extends ActivityComponent { calculateClientAverages( dataset: - | ClientActivityMonthly[] - | (ClientActivityResourceByKey | undefined)[] - | (ClientActivityNewClients | undefined)[] - | undefined + | (NamespaceByKey | undefined)[] + | (ByMonthNewClients | NamespaceNewClients | MountNewClients | undefined)[] ) { return ['entity_clients', 'non_entity_clients'].reduce((count, key) => { const average = this.average(dataset, key); diff --git a/ui/lib/core/addon/utils/client-count-utils.js b/ui/lib/core/addon/utils/client-count-utils.js deleted file mode 100644 index 39fd01aa6c2d..000000000000 --- a/ui/lib/core/addon/utils/client-count-utils.js +++ /dev/null @@ -1,213 +0,0 @@ -/** - * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: BUSL-1.1 - */ - -import { parseAPITimestamp } from 'core/utils/date-formatters'; -import { compareAsc, getUnixTime, isWithinInterval } from 'date-fns'; - -// add new types here -export const CLIENT_TYPES = [ - 'acme_clients', - 'clients', // summation of total clients - 'entity_clients', - 'non_entity_clients', - 'secret_syncs', -]; - -// returns array of VersionHistoryModels for noteworthy upgrades: 1.9, 1.10 -// that occurred between timestamps (i.e. queried activity data) -export const filterVersionHistory = (versionHistory, start, end) => { - if (versionHistory) { - const upgrades = versionHistory.reduce((array, upgradeData) => { - const includesVersion = (v) => - // only add first match, disregard subsequent patch releases of the same version - upgradeData.version.match(v) && !array.some((d) => d.version.match(v)); - - ['1.9', '1.10'].forEach((v) => { - if (includesVersion(v)) array.push(upgradeData); - }); - - return array; - }, []); - - // if there are noteworthy upgrades, only return those during queried date range - if (upgrades.length) { - const startDate = parseAPITimestamp(start); - const endDate = parseAPITimestamp(end); - return upgrades.filter(({ timestampInstalled }) => { - const upgradeDate = parseAPITimestamp(timestampInstalled); - return isWithinInterval(upgradeDate, { start: startDate, end: endDate }); - }); - } - } - return []; -}; - -export const formatDateObject = (dateObj, isEnd) => { - if (dateObj) { - const { year, monthIdx } = dateObj; - // day=0 for Date.UTC() returns the last day of the month before - // increase monthIdx by one to get last day of queried month - const utc = isEnd ? Date.UTC(year, monthIdx + 1, 0) : Date.UTC(year, monthIdx, 1); - return getUnixTime(utc); - } -}; - -export const formatByMonths = (monthsArray) => { - // the monthsArray will always include a timestamp of the month and either new/total client data or counts = null - if (!Array.isArray(monthsArray)) return monthsArray; - - const sortedPayload = sortMonthsByTimestamp(monthsArray); - return sortedPayload?.map((m) => { - const month = parseAPITimestamp(m.timestamp, 'M/yy'); - const totalClientsByNamespace = formatByNamespace(m.namespaces); - const newClientsByNamespace = formatByNamespace(m.new_clients?.namespaces); - return { - month, - timestamp: m.timestamp, - ...destructureClientCounts(m?.counts), - namespaces: formatByNamespace(m.namespaces) || [], - namespaces_by_key: namespaceArrayToObject( - totalClientsByNamespace, - newClientsByNamespace, - month, - m.timestamp - ), - new_clients: { - month, - timestamp: m.timestamp, - ...destructureClientCounts(m?.new_clients?.counts), - namespaces: formatByNamespace(m.new_clients?.namespaces) || [], - }, - }; - }); -}; - -export const formatByNamespace = (namespaceArray) => { - if (!Array.isArray(namespaceArray)) return namespaceArray; - return namespaceArray?.map((ns) => { - // i.e. 'namespace_path' is an empty string for 'root', so use namespace_id - const label = ns.namespace_path === '' ? ns.namespace_id : ns.namespace_path; - // data prior to adding mount granularity will still have a mounts key, - // but with the value: "no mount accessor (pre-1.10 upgrade?)" (ref: vault/activity_log_util_common.go) - // transform to an empty array for type consistency - let mounts = []; - if (Array.isArray(ns.mounts)) { - mounts = ns.mounts.map((m) => ({ label: m['mount_path'], ...destructureClientCounts(m?.counts) })); - } - return { - label, - ...destructureClientCounts(ns.counts), - mounts, - }; - }); -}; - -// In 1.10 'distinct_entities' changed to 'entity_clients' and 'non_entity_tokens' to 'non_entity_clients' -// these deprecated keys still exist on the response, so only return relevant keys here -// when querying historical data the response will always contain the latest client type keys because the activity log is -// constructed based on the version of Vault the user is on (key values will be 0) -export const destructureClientCounts = (verboseObject) => { - if (!verboseObject) return; - return CLIENT_TYPES.reduce((newObj, clientType) => { - newObj[clientType] = verboseObject[clientType]; - return newObj; - }, {}); -}; - -export const sortMonthsByTimestamp = (monthsArray) => { - const sortedPayload = [...monthsArray]; - return sortedPayload.sort((a, b) => - compareAsc(parseAPITimestamp(a.timestamp), parseAPITimestamp(b.timestamp)) - ); -}; - -export const namespaceArrayToObject = (totalClientsByNamespace, newClientsByNamespace, month, timestamp) => { - if (!totalClientsByNamespace) return {}; // return if no data for that month - // all 'new_client' data resides within a separate key of each month (see data structure below) - // FIRST: iterate and nest respective 'new_clients' data within each namespace and mount object - // note: this is happening within the month object - const nestNewClientsWithinNamespace = totalClientsByNamespace?.map((ns) => { - const newNamespaceCounts = newClientsByNamespace?.find((n) => n.label === ns.label); - if (newNamespaceCounts) { - const newClientsByMount = [...newNamespaceCounts.mounts]; - const nestNewClientsWithinMounts = ns.mounts?.map((mount) => { - const new_clients = newClientsByMount?.find((m) => m.label === mount.label) || {}; - return { - ...mount, - new_clients, - }; - }); - return { - ...ns, - new_clients: { - label: ns.label, - ...destructureClientCounts(newNamespaceCounts), - mounts: newClientsByMount, - }, - mounts: [...nestNewClientsWithinMounts], - }; - } - return { - ...ns, - new_clients: {}, - }; - }); - // SECOND: create a new object (namespace_by_key) in which each namespace label is a key - const namespaces_by_key = {}; - nestNewClientsWithinNamespace?.forEach((namespaceObject) => { - // THIRD: make another object within the namespace where each mount label is a key - const mounts_by_key = {}; - namespaceObject.mounts.forEach((mountObject) => { - mounts_by_key[mountObject.label] = { - month, - timestamp, - ...mountObject, - new_clients: { month, ...mountObject.new_clients }, - }; - }); - - const { label, new_clients } = namespaceObject; - namespaces_by_key[label] = { - month, - timestamp, - ...destructureClientCounts(namespaceObject), - new_clients: { month, ...new_clients }, - mounts_by_key, - }; - }); - return namespaces_by_key; - /* - structure of object returned - namespace_by_key: { - "namespace_label": { - month: "3/22", - clients: 32, - entity_clients: 16, - non_entity_clients: 16, - new_clients: { - month: "3/22", - clients: 5, - entity_clients: 2, - non_entity_clients: 3, - mounts: [...array of this namespace's mounts and their new client counts], - }, - mounts_by_key: { - "mount_label": { - month: "3/22", - clients: 3, - entity_clients: 2, - non_entity_clients: 1, - new_clients: { - month: "3/22", - clients: 5, - entity_clients: 2, - non_entity_clients: 3, - }, - }, - }, - }, - }; - */ -}; diff --git a/ui/lib/core/addon/utils/client-count-utils.ts b/ui/lib/core/addon/utils/client-count-utils.ts new file mode 100644 index 000000000000..fe5dc06908ba --- /dev/null +++ b/ui/lib/core/addon/utils/client-count-utils.ts @@ -0,0 +1,295 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { parseAPITimestamp } from 'core/utils/date-formatters'; +import { compareAsc, getUnixTime, isWithinInterval } from 'date-fns'; + +import type ClientsVersionHistoryModel from 'vault/vault/models/clients/version-history'; + +/* +The client count utils are responsible for serializing the sys/internal/counters/activity API response +The initial API response shape and serialized types are defined below. + +To help visualize there are sample responses in ui/tests/helpers/clients.js +*/ + +// add new types here +export const CLIENT_TYPES = [ + 'acme_clients', + 'clients', // summation of total clients + 'entity_clients', + 'non_entity_clients', + 'secret_syncs', +] as const; + +type ClientTypes = (typeof CLIENT_TYPES)[number]; + +// returns array of VersionHistoryModels for noteworthy upgrades: 1.9, 1.10 +// that occurred between timestamps (i.e. queried activity data) +export const filterVersionHistory = ( + versionHistory: ClientsVersionHistoryModel[], + start: string, + end: string +) => { + if (versionHistory) { + const upgrades = versionHistory.reduce((array: ClientsVersionHistoryModel[], upgradeData) => { + const includesVersion = (v: string) => + // only add first match, disregard subsequent patch releases of the same version + upgradeData.version.match(v) && !array.some((d: ClientsVersionHistoryModel) => d.version.match(v)); + + ['1.9', '1.10'].forEach((v) => { + if (includesVersion(v)) array.push(upgradeData); + }); + + return array; + }, []); + + // if there are noteworthy upgrades, only return those during queried date range + if (upgrades.length) { + const startDate = parseAPITimestamp(start) as Date; + const endDate = parseAPITimestamp(end) as Date; + return upgrades.filter(({ timestampInstalled }) => { + const upgradeDate = parseAPITimestamp(timestampInstalled) as Date; + return isWithinInterval(upgradeDate, { start: startDate, end: endDate }); + }); + } + } + return []; +}; + +export const formatDateObject = (dateObj: { monthIdx: number; year: number }, isEnd: boolean) => { + const { year, monthIdx } = dateObj; + // day=0 for Date.UTC() returns the last day of the month before + // increase monthIdx by one to get last day of queried month + const utc = isEnd ? Date.UTC(year, monthIdx + 1, 0) : Date.UTC(year, monthIdx, 1); + return getUnixTime(utc); +}; + +export const formatByMonths = (monthsArray: ActivityMonthBlock[] | EmptyActivityMonthBlock[]) => { + const sortedPayload = sortMonthsByTimestamp(monthsArray); + return sortedPayload?.map((m) => { + const month = parseAPITimestamp(m.timestamp, 'M/yy') as string; + const { timestamp } = m; + // counts are null if there is no monthly data + if (m.counts) { + const totalClientsByNamespace = formatByNamespace(m.namespaces); + const newClientsByNamespace = formatByNamespace(m.new_clients?.namespaces); + return { + month, + timestamp, + ...destructureClientCounts(m.counts), + namespaces: formatByNamespace(m.namespaces) || [], + namespaces_by_key: namespaceArrayToObject( + totalClientsByNamespace, + newClientsByNamespace, + month, + m.timestamp + ), + new_clients: { + month, + timestamp, + ...destructureClientCounts(m?.new_clients?.counts), + namespaces: formatByNamespace(m.new_clients?.namespaces) || [], + }, + }; + } + // empty month + return { + month, + timestamp, + namespaces: [], + namespaces_by_key: {}, + new_clients: { month, timestamp, namespaces: [] }, + }; + }); +}; + +export const formatByNamespace = (namespaceArray: NamespaceObject[]) => { + return namespaceArray.map((ns) => { + // i.e. 'namespace_path' is an empty string for 'root', so use namespace_id + const label = ns.namespace_path === '' ? ns.namespace_id : ns.namespace_path; + // data prior to adding mount granularity will still have a mounts array, + // but the mount_path value will be "no mount accessor (pre-1.10 upgrade?)" (ref: vault/activity_log_util_common.go) + // transform to an empty array for type consistency + let mounts: MountClients[] | [] = []; + if (Array.isArray(ns.mounts)) { + mounts = ns.mounts.map((m) => ({ label: m.mount_path, ...destructureClientCounts(m.counts) })); + } + return { + label, + ...destructureClientCounts(ns.counts), + mounts, + }; + }); +}; + +// In 1.10 'distinct_entities' changed to 'entity_clients' and 'non_entity_tokens' to 'non_entity_clients' +// these deprecated keys still exist on the response, so only return relevant keys here +// when querying historical data the response will always contain the latest client type keys because the activity log is +// constructed based on the version of Vault the user is on (key values will be 0) +export const destructureClientCounts = (verboseObject: Counts | ByNamespaceClients) => { + return CLIENT_TYPES.reduce((newObj: Record, clientType: ClientTypes) => { + newObj[clientType] = verboseObject[clientType]; + return newObj; + }, {} as Record); +}; + +export const sortMonthsByTimestamp = (monthsArray: ActivityMonthBlock[] | EmptyActivityMonthBlock[]) => { + const sortedPayload = [...monthsArray]; + return sortedPayload.sort((a, b) => + compareAsc(parseAPITimestamp(a.timestamp) as Date, parseAPITimestamp(b.timestamp) as Date) + ); +}; + +export const namespaceArrayToObject = ( + monthTotals: ByNamespaceClients[], + // technically this arg (monthNew) is the same type as above, just nested inside monthly new clients + monthNew: ByMonthClients['new_clients']['namespaces'], + month: string, + timestamp: string +) => { + // namespaces_by_key is used to filter monthly activity data by namespace + // it's an object in each month data block where the keys are namespace paths + // and values include new and total client counts for that namespace in that month + const namespaces_by_key = monthTotals.reduce((nsObject: { [key: string]: NamespaceByKey }, ns) => { + const newNsClients = monthNew?.find((n) => n.label === ns.label); + if (newNsClients) { + // mounts_by_key is is used to filter further in a namespace and get monthly activity by mount + // it's an object inside the namespace block where the keys are mount paths + // and the values include new and total client counts for that mount in that month + const mounts_by_key = ns.mounts.reduce((mountObj: { [key: string]: MountByKey }, mount) => { + const newMountClients = newNsClients.mounts.find((m) => m.label === mount.label); + + if (newMountClients) { + mountObj[mount.label] = { + ...mount, + timestamp, + month, + new_clients: { month, ...newMountClients }, + }; + } + return mountObj; + }, {} as { [key: string]: MountByKey }); + + nsObject[ns.label] = { + ...destructureClientCounts(ns), + timestamp, + month, + new_clients: { month, ...newNsClients }, + mounts_by_key, + }; + } + return nsObject; + }, {}); + + return namespaces_by_key; +}; + +// type guards for conditionals +export function hasMountsKey( + obj: ByMonthNewClients | NamespaceNewClients | MountNewClients +): obj is NamespaceNewClients { + return 'mounts' in obj; +} + +export function hasNamespacesKey( + obj: ByMonthNewClients | NamespaceNewClients | MountNewClients +): obj is ByMonthNewClients { + return 'namespaces' in obj; +} + +// TYPES RETURNED BY UTILS (serialized) + +export interface TotalClients { + clients: number; + entity_clients: number; + non_entity_clients: number; + secret_syncs: number; + acme_clients: number; +} + +export interface ByNamespaceClients extends TotalClients { + label: string; + mounts: MountClients[]; +} + +export interface MountClients extends TotalClients { + label: string; +} + +export interface ByMonthClients extends TotalClients { + month: string; + timestamp: string; + namespaces: ByNamespaceClients[]; + namespaces_by_key: { [key: string]: NamespaceByKey }; + new_clients: ByMonthNewClients; +} +export interface ByMonthNewClients extends TotalClients { + month: string; + timestamp: string; + namespaces: ByNamespaceClients[]; +} + +export interface NamespaceByKey extends TotalClients { + month: string; + timestamp: string; + mounts_by_key: { [key: string]: MountByKey }; + new_clients: NamespaceNewClients; +} + +export interface NamespaceNewClients extends TotalClients { + month: string; + label: string; + mounts: MountClients[]; +} + +export interface MountByKey extends TotalClients { + month: string; + timestamp: string; + label: string; + new_clients: MountNewClients; +} + +export interface MountNewClients extends TotalClients { + month: string; + label: string; +} + +// API RESPONSE SHAPE (prior to serialization) + +export interface NamespaceObject { + namespace_id: string; + namespace_path: string; + counts: Counts; + mounts: { mount_path: string; counts: Counts }[]; +} + +export interface ActivityMonthBlock { + timestamp: string; // YYYY-MM-01T00:00:00Z (always the first day of the month) + counts: Counts; + namespaces: NamespaceObject[]; + new_clients: { + counts: Counts; + namespaces: NamespaceObject[]; + timestamp: string; + }; +} + +export interface EmptyActivityMonthBlock { + timestamp: string; // YYYY-MM-01T00:00:00Z (always the first day of the month) + counts: null; + namespaces: null; + new_clients: null; +} + +export interface Counts { + acme_clients: number; + clients: number; + distinct_entities: number; + entity_clients: number; + non_entity_clients: number; + non_entity_tokens: number; + secret_syncs: number; +} diff --git a/ui/mirage/handlers/clients.js b/ui/mirage/handlers/clients.js index 6293542bd1a2..2e9d27e9822e 100644 --- a/ui/mirage/handlers/clients.js +++ b/ui/mirage/handlers/clients.js @@ -110,8 +110,10 @@ function generateMonths(startDate, endDate, namespaces) { const numberOfMonths = differenceInCalendarMonths(endDateObject, startDateObject) + 1; const months = []; - // only generate monthly block if queried dates span an upgrade - if (isWithinInterval(UPGRADE_DATE, { start: startDateObject, end: endDateObject })) { + // only generate monthly block if queried dates span or follow upgrade to 1.10 + const upgradeWithin = isWithinInterval(UPGRADE_DATE, { start: startDateObject, end: endDateObject }); + const upgradeAfter = isAfter(startDateObject, UPGRADE_DATE); + if (upgradeWithin || upgradeAfter) { for (let i = 0; i < numberOfMonths; i++) { const month = addMonths(startOfMonth(startDateObject), i); const hasNoData = isBefore(month, UPGRADE_DATE) && !isSameMonth(month, UPGRADE_DATE); diff --git a/ui/tests/helpers/clients.js b/ui/tests/helpers/clients.js index 38dea95fd78e..20874b41cd40 100644 --- a/ui/tests/helpers/clients.js +++ b/ui/tests/helpers/clients.js @@ -193,7 +193,7 @@ export const ACTIVITY_RESPONSE_STUB = { }, { namespace_id: '81ry61', - namespace_path: 'ns/1', + namespace_path: 'ns1', counts: { distinct_entities: 783, entity_clients: 783, @@ -315,7 +315,7 @@ export const ACTIVITY_RESPONSE_STUB = { }, { namespace_id: '81ry61', - namespace_path: 'ns/1', + namespace_path: 'ns1', counts: { distinct_entities: 50, entity_clients: 50, @@ -378,7 +378,7 @@ export const ACTIVITY_RESPONSE_STUB = { namespaces: [ { namespace_id: '81ry61', - namespace_path: 'ns/1', + namespace_path: 'ns1', counts: { distinct_entities: 30, entity_clients: 30, @@ -493,6 +493,174 @@ export const ACTIVITY_RESPONSE_STUB = { }, }; +// combined activity data before and after 1.10 upgrade when Vault added mount attribution +export const MIXED_ACTIVITY_RESPONSE_STUB = { + start_time: '2024-03-01T00:00:00Z', + end_time: '2024-04-30T23:59:59Z', + total: { + acme_clients: 0, + clients: 3, + distinct_entities: 3, + entity_clients: 3, + non_entity_clients: 0, + non_entity_tokens: 0, + secret_syncs: 0, + }, + by_namespace: [ + { + counts: { + acme_clients: 0, + clients: 3, + distinct_entities: 3, + entity_clients: 3, + non_entity_clients: 0, + non_entity_tokens: 0, + secret_syncs: 0, + }, + mounts: [ + { + counts: { + acme_clients: 0, + clients: 2, + distinct_entities: 2, + entity_clients: 2, + non_entity_clients: 0, + non_entity_tokens: 0, + secret_syncs: 0, + }, + mount_path: 'no mount accessor (pre-1.10 upgrade?)', + }, + { + counts: { + acme_clients: 0, + clients: 1, + distinct_entities: 1, + entity_clients: 1, + non_entity_clients: 0, + non_entity_tokens: 0, + secret_syncs: 0, + }, + mount_path: 'auth/u/', + }, + ], + namespace_id: 'root', + namespace_path: '', + }, + ], + months: [ + { + counts: null, + namespaces: null, + new_clients: null, + timestamp: '2024-03-01T00:00:00Z', + }, + { + counts: { + acme_clients: 0, + clients: 3, + distinct_entities: 0, + entity_clients: 3, + non_entity_clients: 0, + non_entity_tokens: 0, + secret_syncs: 0, + }, + namespaces: [ + { + counts: { + acme_clients: 0, + clients: 3, + distinct_entities: 0, + entity_clients: 3, + non_entity_clients: 0, + non_entity_tokens: 0, + secret_syncs: 0, + }, + mounts: [ + { + counts: { + acme_clients: 0, + clients: 2, + distinct_entities: 0, + entity_clients: 2, + non_entity_clients: 0, + non_entity_tokens: 0, + secret_syncs: 0, + }, + mount_path: 'no mount accessor (pre-1.10 upgrade?)', + }, + { + counts: { + acme_clients: 0, + clients: 1, + distinct_entities: 0, + entity_clients: 1, + non_entity_clients: 0, + non_entity_tokens: 0, + secret_syncs: 0, + }, + mount_path: 'auth/u/', + }, + ], + namespace_id: 'root', + namespace_path: '', + }, + ], + new_clients: { + counts: { + acme_clients: 0, + clients: 3, + distinct_entities: 0, + entity_clients: 3, + non_entity_clients: 0, + non_entity_tokens: 0, + secret_syncs: 0, + }, + namespaces: [ + { + counts: { + acme_clients: 0, + clients: 3, + distinct_entities: 0, + entity_clients: 3, + non_entity_clients: 0, + non_entity_tokens: 0, + secret_syncs: 0, + }, + mounts: [ + { + counts: { + acme_clients: 0, + clients: 2, + distinct_entities: 0, + entity_clients: 2, + non_entity_clients: 0, + non_entity_tokens: 0, + secret_syncs: 0, + }, + mount_path: 'no mount accessor (pre-1.10 upgrade?)', + }, + { + counts: { + acme_clients: 0, + clients: 1, + distinct_entities: 0, + entity_clients: 1, + non_entity_clients: 0, + non_entity_tokens: 0, + secret_syncs: 0, + }, + mount_path: 'auth/u/', + }, + ], + namespace_id: 'root', + namespace_path: '', + }, + ], + }, + timestamp: '2024-04-01T00:00:00Z', + }, + ], +}; // format returned by model hook in routes/vault/cluster/clients.ts export const VERSION_HISTORY = [ { @@ -560,7 +728,7 @@ export const SERIALIZED_ACTIVITY_RESPONSE = { ], }, { - label: 'ns/1', + label: 'ns1', clients: 2376, entity_clients: 783, non_entity_clients: 1193, @@ -649,7 +817,7 @@ export const SERIALIZED_ACTIVITY_RESPONSE = { ], }, { - label: 'ns/1', + label: 'ns1', clients: 3085, entity_clients: 50, non_entity_clients: 140, @@ -787,7 +955,7 @@ export const SERIALIZED_ACTIVITY_RESPONSE = { }, }, }, - 'ns/1': { + ns1: { month: '9/23', timestamp: '2023-09-01T00:00:00Z', clients: 3085, @@ -797,7 +965,7 @@ export const SERIALIZED_ACTIVITY_RESPONSE = { acme_clients: 125, new_clients: { month: '9/23', - label: 'ns/1', + label: 'ns1', clients: 222, entity_clients: 30, non_entity_clients: 62, @@ -901,7 +1069,7 @@ export const SERIALIZED_ACTIVITY_RESPONSE = { acme_clients: 50, namespaces: [ { - label: 'ns/1', + label: 'ns1', clients: 222, entity_clients: 30, non_entity_clients: 62, diff --git a/ui/tests/integration/components/dashboard/client-count-card-test.js b/ui/tests/integration/components/dashboard/client-count-card-test.js index 8bed582d63c0..936295c33251 100644 --- a/ui/tests/integration/components/dashboard/client-count-card-test.js +++ b/ui/tests/integration/components/dashboard/client-count-card-test.js @@ -8,59 +8,37 @@ import { setupRenderingTest } from 'vault/tests/helpers'; import { render, click } from '@ember/test-helpers'; import { hbs } from 'ember-cli-htmlbars'; import { setupMirage } from 'ember-cli-mirage/test-support'; +import sinon from 'sinon'; +import { LICENSE_START, STATIC_NOW } from 'vault/mirage/handlers/clients'; import timestamp from 'core/utils/timestamp'; -import { parseAPITimestamp } from 'core/utils/date-formatters'; +import { ACTIVITY_RESPONSE_STUB } from 'vault/tests/helpers/clients'; module('Integration | Component | dashboard/client-count-card', function (hooks) { setupRenderingTest(hooks); setupMirage(hooks); + hooks.before(function () { + sinon.stub(timestamp, 'now').callsFake(() => STATIC_NOW); + }); + hooks.beforeEach(function () { this.license = { - startTime: '2018-04-03T14:15:30', + startTime: LICENSE_START.toISOString(), }; }); + hooks.after(function () { + timestamp.now.restore(); + }); + test('it should display client count information', async function (assert) { + assert.expect(9); this.server.get('sys/internal/counters/activity', () => { + // this assertion should be hit twice, once initially and then again clicking 'refresh' + assert.true(true, 'makes request to sys/internal/counters/activity'); return { request_id: 'some-activity-id', - data: { - months: [ - { - timestamp: '2023-08-01T00:00:00-07:00', - counts: {}, - namespaces: [ - { - namespace_id: 'root', - namespace_path: '', - counts: {}, - mounts: [{ mount_path: 'auth/up2/', counts: {} }], - }, - ], - new_clients: { - counts: { - clients: 12, - }, - namespaces: [ - { - namespace_id: 'root', - namespace_path: '', - counts: { - clients: 12, - }, - mounts: [{ mount_path: 'auth/up2/', counts: {} }], - }, - ], - }, - }, - ], - total: { - clients: 300417, - entity_clients: 73150, - non_entity_clients: 227267, - }, - }, + data: ACTIVITY_RESPONSE_STUB, }; }); @@ -69,74 +47,15 @@ module('Integration | Component | dashboard/client-count-card', function (hooks) assert.dom('[data-test-stat-text="total-clients"] .stat-label').hasText('Total'); assert .dom('[data-test-stat-text="total-clients"] .stat-text') - .hasText( - `The number of clients in this billing period (Apr 2018 - ${parseAPITimestamp( - timestamp.now().toISOString(), - 'MMM yyyy' - )}).` - ); - assert.dom('[data-test-stat-text="total-clients"] .stat-value').hasText('300,417'); + .hasText('The number of clients in this billing period (Jul 2023 - Jan 2024).'); + assert.dom('[data-test-stat-text="total-clients"] .stat-value').hasText('7,805'); assert.dom('[data-test-stat-text="new-clients"] .stat-label').hasText('New'); assert .dom('[data-test-stat-text="new-clients"] .stat-text') .hasText('The number of clients new to Vault in the current month.'); - assert.dom('[data-test-stat-text="new-clients"] .stat-value').hasText('12'); - this.server.get('sys/internal/counters/activity', () => { - return { - request_id: 'some-activity-id', - data: { - months: [ - { - timestamp: '2023-09-01T00:00:00-07:00', - counts: {}, - namespaces: [ - { - namespace_id: 'root', - namespace_path: '', - counts: {}, - mounts: [{ mount_path: 'auth/up2/', counts: {} }], - }, - ], - new_clients: { - counts: { - clients: 5, - }, - namespaces: [ - { - namespace_id: 'root', - namespace_path: '', - counts: { - clients: 12, - }, - mounts: [{ mount_path: 'auth/up2/', counts: {} }], - }, - ], - }, - }, - ], - total: { - clients: 120, - entity_clients: 100, - non_entity_clients: 100, - }, - }, - }; - }); + assert.dom('[data-test-stat-text="new-clients"] .stat-value').hasText('336'); + + // fires second request to /activity await click('[data-test-refresh]'); - assert.dom('[data-test-stat-text="total-clients"] .stat-label').hasText('Total'); - assert - .dom('[data-test-stat-text="total-clients"] .stat-text') - .hasText( - `The number of clients in this billing period (Apr 2018 - ${parseAPITimestamp( - timestamp.now().toISOString(), - 'MMM yyyy' - )}).` - ); - assert.dom('[data-test-stat-text="total-clients"] .stat-value').hasText('120'); - assert.dom('[data-test-stat-text="new-clients"] .stat-label').hasText('New'); - assert - .dom('[data-test-stat-text="new-clients"] .stat-text') - .hasText('The number of clients new to Vault in the current month.'); - assert.dom('[data-test-stat-text="new-clients"] .stat-value').hasText('5'); }); }); diff --git a/ui/tests/integration/utils/client-count-utils-test.js b/ui/tests/integration/utils/client-count-utils-test.js index 907f0be10f16..710adddb24d5 100644 --- a/ui/tests/integration/utils/client-count-utils-test.js +++ b/ui/tests/integration/utils/client-count-utils-test.js @@ -16,6 +16,7 @@ import { import { LICENSE_START } from 'vault/mirage/handlers/clients'; import { ACTIVITY_RESPONSE_STUB as RESPONSE, + MIXED_ACTIVITY_RESPONSE_STUB as MIXED_RESPONSE, VERSION_HISTORY, SERIALIZED_ACTIVITY_RESPONSE, } from 'vault/tests/helpers/clients'; @@ -29,7 +30,7 @@ in a serializer test for easier debugging module('Integration | Util | client count utils', function (hooks) { setupTest(hooks); - test('filterVersionHistory: returns version data for relevant upgrades that occurred during date range', async function (assert) { + test('filterVersionHistory: it returns version data for relevant upgrades that occurred during date range', async function (assert) { assert.expect(2); // LICENSE_START is '2023-07-02T00:00:00Z' const original = [...VERSION_HISTORY]; @@ -56,8 +57,8 @@ module('Integration | Util | client count utils', function (hooks) { assert.propEqual(VERSION_HISTORY, original, 'it does not modify original array'); }); - test('formatByMonths: formats the months array', async function (assert) { - assert.expect(4); + test('formatByMonths: it formats the months array', async function (assert) { + assert.expect(5); const original = [...RESPONSE.months]; const [formattedNoData, formattedWithActivity] = formatByMonths(RESPONSE.months); @@ -79,9 +80,10 @@ module('Integration | Util | client count utils', function (hooks) { 'it formats new_clients block for months with data' ); assert.propEqual(RESPONSE.months, original, 'it does not modify original months array'); + assert.propEqual(formatByMonths([]), [], 'it returns an empty array if the months key is empty'); }); - test('formatByNamespace: formats namespace array with mounts', async function (assert) { + test('formatByNamespace: it formats namespace array with mounts', async function (assert) { assert.expect(3); const original = [...RESPONSE.by_namespace]; const [formattedRoot, formattedNs1] = formatByNamespace(RESPONSE.by_namespace); @@ -92,8 +94,62 @@ module('Integration | Util | client count utils', function (hooks) { assert.propEqual(RESPONSE.by_namespace, original, 'it does not modify original by_namespace array'); }); - test('formatByNamespace: formats namespace array with no mounts (activity log data < 1.10)', async function (assert) { - assert.expect(1); + test('destructureClientCounts: it returns relevant key names when both old and new keys exist', async function (assert) { + assert.expect(2); + const original = { ...RESPONSE.total }; + const expected = { + entity_clients: 1816, + non_entity_clients: 3117, + secret_syncs: 2672, + acme_clients: 200, + clients: 7805, + }; + assert.propEqual(destructureClientCounts(RESPONSE.total), expected); + assert.propEqual(RESPONSE.total, original, 'it does not modify original object'); + }); + + test('sortMonthsByTimestamp: sorts timestamps chronologically, oldest to most recent', async function (assert) { + assert.expect(2); + // API returns them in order so this test is extra extra + const unOrdered = [RESPONSE.months[1], RESPONSE.months[0]]; // mixup order + const original = [...RESPONSE.months]; + const expected = RESPONSE.months; + assert.propEqual(sortMonthsByTimestamp(unOrdered), expected); + assert.propEqual(RESPONSE.months, original, 'it does not modify original array'); + }); + + test('namespaceArrayToObject: it returns namespaces_by_key and mounts_by_key', async function (assert) { + assert.expect(5); + + // month at 0-index has no data so use second month in array, empty month format covered by formatByMonths test above + const original = { ...RESPONSE.months[1] }; + const expectedObject = SERIALIZED_ACTIVITY_RESPONSE.by_month[1].namespaces_by_key; + const formattedTotal = formatByNamespace(RESPONSE.months[1].namespaces); + + const testObject = namespaceArrayToObject( + formattedTotal, + formatByNamespace(RESPONSE.months[1].new_clients.namespaces), + '9/23', + '2023-09-01T00:00:00Z' + ); + + const { root } = testObject; + const { root: expectedRoot } = expectedObject; + assert.propEqual(root.new_clients, expectedRoot.new_clients, 'it formats namespaces new_clients'); + assert.propEqual(root.mounts_by_key, expectedRoot.mounts_by_key, 'it formats namespaces mounts_by_key'); + assert.propContains(root, expectedRoot, 'namespace has correct keys'); + + assert.propEqual( + namespaceArrayToObject(formattedTotal, formatByNamespace([]), '9/23', '2023-09-01T00:00:00Z'), + {}, + 'returns an empty object when there are no new clients ' + ); + assert.propEqual(RESPONSE.months[1], original, 'it does not modify original month data'); + }); + + // TESTS FOR COMBINED ACTIVITY DATA - no mount attribution < 1.10 + test('it formats the namespaces array with no mount attribution (activity log data < 1.10)', async function (assert) { + assert.expect(2); const noMounts = [ { namespace_id: 'root', @@ -107,7 +163,20 @@ module('Integration | Util | client count utils', function (hooks) { acme_clients: 0, clients: 30, }, - mounts: 'no mount accessor (pre-1.10 upgrade?)', + mounts: [ + { + counts: { + distinct_entities: 10, + entity_clients: 10, + non_entity_tokens: 20, + non_entity_clients: 20, + secret_syncs: 0, + acme_clients: 0, + clients: 30, + }, + mount_path: 'no mount accessor (pre-1.10 upgrade?)', + }, + ], }, ]; const expected = [ @@ -116,61 +185,183 @@ module('Integration | Util | client count utils', function (hooks) { clients: 30, entity_clients: 10, label: 'root', - mounts: [], + mounts: [ + { + acme_clients: 0, + clients: 30, + entity_clients: 10, + label: 'no mount accessor (pre-1.10 upgrade?)', + non_entity_clients: 20, + secret_syncs: 0, + }, + ], non_entity_clients: 20, secret_syncs: 0, }, ]; assert.propEqual(formatByNamespace(noMounts), expected, 'it formats namespace without mounts'); + assert.propEqual(formatByNamespace([]), [], 'it returns an empty array if the by_namespace key is empty'); }); - test('destructureClientCounts: homogenizes key names when both old and new keys exist, or just old key names', async function (assert) { - assert.expect(2); - const original = { ...RESPONSE.total }; - const expected = { - entity_clients: 1816, - non_entity_clients: 3117, - secret_syncs: 2672, - acme_clients: 200, - clients: 7805, - }; - assert.propEqual(destructureClientCounts(RESPONSE.total), expected); - assert.propEqual(RESPONSE.total, original, 'it does not modify original object'); - }); - - test('sortMonthsByTimestamp: sorts timestamps chronologically, oldest to most recent', async function (assert) { - assert.expect(2); - // API returns them in order so this test is extra extra - const unOrdered = [RESPONSE.months[1], RESPONSE.months[0]]; // mixup order - const original = [...RESPONSE.months]; - const expected = RESPONSE.months; - assert.propEqual(sortMonthsByTimestamp(unOrdered), expected); - assert.propEqual(RESPONSE.months, original, 'it does not modify original array'); - }); - - test('namespaceArrayToObject: it generates namespaces_by_key without modifying original', async function (assert) { + test('it formats the months array with mixed activity data', async function (assert) { assert.expect(3); - // month at 0-index has no data so use second month in array - const { namespaces, new_clients } = RESPONSE.months[1]; - const original = { ...RESPONSE.months[1] }; - const byNamespaceKeyObject = namespaceArrayToObject( - formatByNamespace(namespaces), - formatByNamespace(new_clients.namespaces), - '9/23', - '2023-09-01T00:00:00Z' + const [, formattedWithActivity] = formatByMonths(MIXED_RESPONSE.months); + // mirage isn't set up to generate mixed data, so hardcoding the expected responses here + assert.propEqual( + formattedWithActivity.namespaces, + [ + { + acme_clients: 0, + clients: 3, + entity_clients: 3, + label: 'root', + mounts: [ + { + acme_clients: 0, + clients: 2, + entity_clients: 2, + label: 'no mount accessor (pre-1.10 upgrade?)', + non_entity_clients: 0, + secret_syncs: 0, + }, + { + acme_clients: 0, + clients: 1, + entity_clients: 1, + label: 'auth/u/', + non_entity_clients: 0, + secret_syncs: 0, + }, + ], + non_entity_clients: 0, + secret_syncs: 0, + }, + ], + 'it formats combined data for monthly namespaces spanning upgrade to 1.10' ); - assert.propEqual( - byNamespaceKeyObject, - SERIALIZED_ACTIVITY_RESPONSE.by_month[1].namespaces_by_key, - 'it returns object with namespaces by key and includes mounts_by_key' + formattedWithActivity.new_clients, + { + acme_clients: 0, + clients: 3, + entity_clients: 3, + month: '4/24', + namespaces: [ + { + acme_clients: 0, + clients: 3, + entity_clients: 3, + label: 'root', + mounts: [ + { + acme_clients: 0, + clients: 2, + entity_clients: 2, + label: 'no mount accessor (pre-1.10 upgrade?)', + non_entity_clients: 0, + secret_syncs: 0, + }, + { + acme_clients: 0, + clients: 1, + entity_clients: 1, + label: 'auth/u/', + non_entity_clients: 0, + secret_syncs: 0, + }, + ], + non_entity_clients: 0, + secret_syncs: 0, + }, + ], + non_entity_clients: 0, + secret_syncs: 0, + timestamp: '2024-04-01T00:00:00Z', + }, + 'it formats combined data for monthly new_clients spanning upgrade to 1.10' ); assert.propEqual( - namespaceArrayToObject(null, null, '10/21', 'timestamp-here'), - {}, - 'returns an empty object when monthByNamespace = null' + formattedWithActivity.namespaces_by_key, + { + root: { + acme_clients: 0, + clients: 3, + entity_clients: 3, + month: '4/24', + mounts_by_key: { + 'auth/u/': { + acme_clients: 0, + clients: 1, + entity_clients: 1, + label: 'auth/u/', + month: '4/24', + new_clients: { + acme_clients: 0, + clients: 1, + entity_clients: 1, + label: 'auth/u/', + month: '4/24', + non_entity_clients: 0, + secret_syncs: 0, + }, + non_entity_clients: 0, + secret_syncs: 0, + timestamp: '2024-04-01T00:00:00Z', + }, + 'no mount accessor (pre-1.10 upgrade?)': { + acme_clients: 0, + clients: 2, + entity_clients: 2, + label: 'no mount accessor (pre-1.10 upgrade?)', + month: '4/24', + new_clients: { + acme_clients: 0, + clients: 2, + entity_clients: 2, + label: 'no mount accessor (pre-1.10 upgrade?)', + month: '4/24', + non_entity_clients: 0, + secret_syncs: 0, + }, + non_entity_clients: 0, + secret_syncs: 0, + timestamp: '2024-04-01T00:00:00Z', + }, + }, + new_clients: { + acme_clients: 0, + clients: 3, + entity_clients: 3, + label: 'root', + month: '4/24', + mounts: [ + { + acme_clients: 0, + clients: 2, + entity_clients: 2, + label: 'no mount accessor (pre-1.10 upgrade?)', + non_entity_clients: 0, + secret_syncs: 0, + }, + { + acme_clients: 0, + clients: 1, + entity_clients: 1, + label: 'auth/u/', + non_entity_clients: 0, + secret_syncs: 0, + }, + ], + non_entity_clients: 0, + secret_syncs: 0, + }, + non_entity_clients: 0, + secret_syncs: 0, + timestamp: '2024-04-01T00:00:00Z', + }, + }, + 'it formats combined data for monthly namespaces_by_key spanning upgrade to 1.10' ); - assert.propEqual(RESPONSE.months[1], original, 'it does not modify original month data'); }); }); diff --git a/ui/types/vault/charts/client-counts.d.ts b/ui/types/vault/charts/client-counts.d.ts index 7edb8b1b6d3e..c3ca2a922c7f 100644 --- a/ui/types/vault/charts/client-counts.d.ts +++ b/ui/types/vault/charts/client-counts.d.ts @@ -3,15 +3,11 @@ * SPDX-License-Identifier: BUSL-1.1 */ -// Count and EmptyCount are mutually exclusive +import type { TotalClients } from 'core/utils/client-count-utils'; + +// TotalClients and EmptyCount are mutually exclusive // but that's hard to represent in an interface // so for now we just have both -interface Count { - clients?: number; - entity_clients?: number; - non_entity_clients?: number; - secret_syncs?: number; -} interface EmptyCount { count?: null; } @@ -20,6 +16,6 @@ interface Timestamp { timestamp: string; // ISO 8601 } -export interface MonthlyChartData extends Count, EmptyCount, Timestamp { - new_clients?: Count; +export interface MonthlyChartData extends TotalClients, EmptyCount, Timestamp { + new_clients?: TotalClients; } diff --git a/ui/types/vault/models/clients/activity.d.ts b/ui/types/vault/models/clients/activity.d.ts index 4c1f43862a33..58e7123777db 100644 --- a/ui/types/vault/models/clients/activity.d.ts +++ b/ui/types/vault/models/clients/activity.d.ts @@ -5,45 +5,12 @@ import type { Model } from 'vault/app-types'; -interface ClientActivityTotals { - clients: number; - entity_clients: number; - non_entity_clients: number; - secret_syncs: number; -} - -interface ClientActivityNestedCount extends ClientActivityTotals { - label: string; -} - -interface ClientActivityNewClients extends ClientActivityTotals { - month: string; - mounts?: ClientActivityNestedCount[]; - namespaces?: ClientActivityNestedCount[]; -} - -interface ClientActivityNamespace extends ClientActivityNestedCount { - mounts: ClientActivityNestedCount[]; -} - -interface ClientActivityResourceByKey extends ClientActivityTotals { - month: 'string'; - mounts_by_key: { [key: string]: ClientActivityResourceByKey }; - new_clients: ClientActivityNewClients; -} - -interface ClientActivityMonthly extends ClientActivityTotals { - month: string; - timestamp: string; - namespaces: ClientActivityNamespace[]; - namespaces_by_key: { [key: string]: ClientActivityResourceByKey }; - new_clients: ClientActivityNewClients; -} +import type { ByMonthClients, ByNamespaceClients, TotalClients } from 'core/utils/client-count-utils'; export default interface ClientsActivityModel extends Model { - byMonth: ClientActivityMonthly[]; - byNamespace: ClientActivityNamespace[]; - total: ClientActivityTotals; + byMonth: ByMonthClients[]; + byNamespace: ByNamespaceClients[]; + total: TotalClients; startTime: string; endTime: string; responseTimestamp: string;