diff --git a/CODEOWNERS b/CODEOWNERS index e6910c3f4..7128b86a7 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,3 +1,3 @@ # These owners will be the default owners for everything in # the repo. Unless a later match takes precedence. -* @katrogan @wild-endeavor @anandswaminathan @EngHabu @kumare3 @pmahindrakar-oss +* @katrogan @wild-endeavor @EngHabu @kumare3 @pmahindrakar-oss @hamersaw @eapolinario diff --git a/Dockerfile b/Dockerfile index fca064ff1..fca317cfd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,7 +3,12 @@ # # TO OPT OUT OF UPDATES, SEE https://github.com/lyft/boilerplate/blob/master/Readme.rst -FROM golang:1.18-alpine3.15 as builder +FROM --platform=${BUILDPLATFORM} golang:1.18-alpine3.15 as builder + +ARG TARGETARCH +ENV GOARCH "${TARGETARCH}" +ENV GOOS linux + RUN apk add git openssh-client make curl # Create the artifacts directory diff --git a/auth/auth_context.go b/auth/auth_context.go index 0e3ec07b2..cde008d10 100644 --- a/auth/auth_context.go +++ b/auth/auth_context.go @@ -133,6 +133,11 @@ func NewAuthenticationContext(ctx context.Context, sm core.SecretManager, oauth2 Timeout: IdpConnectionTimeout, } + if len(options.UserAuth.HTTPProxyURL.String()) > 0 { + logger.Infof(ctx, "HTTPProxy URL for OAuth2 is: %s", options.UserAuth.HTTPProxyURL.String()) + httpClient.Transport = &http.Transport{Proxy: http.ProxyURL(&options.UserAuth.HTTPProxyURL.URL)} + } + // Construct an oidc Provider, which needs its own http Client. oidcCtx := oidc.ClientContext(ctx, httpClient) baseURL := options.UserAuth.OpenID.BaseURL.String() diff --git a/auth/authzserver/claims_verifier.go b/auth/authzserver/claims_verifier.go index 7887ee084..353c889d6 100644 --- a/auth/authzserver/claims_verifier.go +++ b/auth/authzserver/claims_verifier.go @@ -64,5 +64,5 @@ func verifyClaims(expectedAudience sets.String, claimsRaw map[string]interface{} scopes.Insert(auth.ScopeAll) } - return auth.NewIdentityContext(claims.Audience[foundAudIndex], claims.Subject, clientID, claims.IssuedAt, scopes, userInfo, claimsRaw), nil + return auth.NewIdentityContext(claims.Audience[foundAudIndex], claims.Subject, clientID, claims.IssuedAt, scopes, userInfo, claimsRaw) } diff --git a/auth/authzserver/metadata_provider.go b/auth/authzserver/metadata_provider.go index 6e6ed79a7..bba6f47c4 100644 --- a/auth/authzserver/metadata_provider.go +++ b/auth/authzserver/metadata_provider.go @@ -90,6 +90,7 @@ func (s OAuth2MetadataProvider) GetPublicClientConfig(context.Context, *service. RedirectUri: s.cfg.AppAuth.ThirdParty.FlyteClientConfig.RedirectURI, Scopes: s.cfg.AppAuth.ThirdParty.FlyteClientConfig.Scopes, AuthorizationMetadataKey: s.cfg.GrpcAuthorizationHeader, + Audience: s.cfg.AppAuth.ThirdParty.FlyteClientConfig.Audience, }, nil } diff --git a/auth/authzserver/metadata_provider_test.go b/auth/authzserver/metadata_provider_test.go index 527091c71..b3606ac4a 100644 --- a/auth/authzserver/metadata_provider_test.go +++ b/auth/authzserver/metadata_provider_test.go @@ -24,6 +24,7 @@ func TestOAuth2MetadataProvider_FlyteClient(t *testing.T) { ClientID: "my-client", RedirectURI: "client/", Scopes: []string{"all"}, + Audience: "http://dummyServer", }, }, }, @@ -35,6 +36,7 @@ func TestOAuth2MetadataProvider_FlyteClient(t *testing.T) { assert.Equal(t, "my-client", resp.ClientId) assert.Equal(t, "client/", resp.RedirectUri) assert.Equal(t, []string{"all"}, resp.Scopes) + assert.Equal(t, "http://dummyServer", resp.Audience) } func TestOAuth2MetadataProvider_OAuth2Metadata(t *testing.T) { diff --git a/auth/config/authorizationservertype_enumer.go b/auth/config/authorizationservertype_enumer.go index a5c7dc276..f6e89a6fd 100644 --- a/auth/config/authorizationservertype_enumer.go +++ b/auth/config/authorizationservertype_enumer.go @@ -1,6 +1,5 @@ // Code generated by "enumer --type=AuthorizationServerType --trimprefix=AuthorizationServerType -json"; DO NOT EDIT. -// package config import ( diff --git a/auth/config/config.go b/auth/config/config.go index eca659fe1..a42365205 100644 --- a/auth/config/config.go +++ b/auth/config/config.go @@ -215,6 +215,9 @@ type UserAuthConfig struct { OpenID OpenIDOptions `json:"openId" pflag:",OpenID Configuration for User Auth"` // Possibly add basicAuth & SAML/p support. + // HTTPProxyURL allows operators to access external OAuth2 servers using an external HTTP Proxy + HTTPProxyURL config.URL `json:"httpProxyURL" pflag:",OPTIONAL: HTTP Proxy to be used for OAuth requests."` + // Secret names, defaults are set in DefaultConfig variable above but are possible to override through configs. CookieHashKeySecretName string `json:"cookieHashKeySecretName" pflag:",OPTIONAL: Secret name to use for cookie hash key."` CookieBlockKeySecretName string `json:"cookieBlockKeySecretName" pflag:",OPTIONAL: Secret name to use for cookie block key."` diff --git a/auth/config/config_flags.go b/auth/config/config_flags.go index a13193164..b84be106f 100755 --- a/auth/config/config_flags.go +++ b/auth/config/config_flags.go @@ -60,6 +60,7 @@ func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { cmdFlags.String(fmt.Sprintf("%v%v", prefix, "userAuth.openId.clientSecretFile"), DefaultConfig.UserAuth.OpenID.DeprecatedClientSecretFile, "") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "userAuth.openId.baseUrl"), DefaultConfig.UserAuth.OpenID.BaseURL.String(), "") cmdFlags.StringSlice(fmt.Sprintf("%v%v", prefix, "userAuth.openId.scopes"), DefaultConfig.UserAuth.OpenID.Scopes, "") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "userAuth.httpProxyURL"), DefaultConfig.UserAuth.HTTPProxyURL.String(), "OPTIONAL: HTTP Proxy to be used for OAuth requests.") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "userAuth.cookieHashKeySecretName"), DefaultConfig.UserAuth.CookieHashKeySecretName, "OPTIONAL: Secret name to use for cookie hash key.") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "userAuth.cookieBlockKeySecretName"), DefaultConfig.UserAuth.CookieBlockKeySecretName, "OPTIONAL: Secret name to use for cookie block key.") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "userAuth.cookieSetting.sameSitePolicy"), DefaultConfig.UserAuth.CookieSetting.SameSitePolicy.String(), "OPTIONAL: Allows you to declare if your cookie should be restricted to a first-party or same-site context.Wrapper around http.SameSite.") @@ -77,5 +78,6 @@ func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { cmdFlags.String(fmt.Sprintf("%v%v", prefix, "appAuth.thirdPartyConfig.flyteClient.clientId"), DefaultConfig.AppAuth.ThirdParty.FlyteClientConfig.ClientID, "public identifier for the app which handles authorization for a Flyte deployment") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "appAuth.thirdPartyConfig.flyteClient.redirectUri"), DefaultConfig.AppAuth.ThirdParty.FlyteClientConfig.RedirectURI, "This is the callback uri registered with the app which handles authorization for a Flyte deployment") cmdFlags.StringSlice(fmt.Sprintf("%v%v", prefix, "appAuth.thirdPartyConfig.flyteClient.scopes"), DefaultConfig.AppAuth.ThirdParty.FlyteClientConfig.Scopes, "Recommended scopes for the client to request.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "appAuth.thirdPartyConfig.flyteClient.audience"), DefaultConfig.AppAuth.ThirdParty.FlyteClientConfig.Audience, "Audience to use when initiating OAuth2 authorization requests.") return cmdFlags } diff --git a/auth/config/config_flags_test.go b/auth/config/config_flags_test.go index 12a18a1f9..ffcb653d8 100755 --- a/auth/config/config_flags_test.go +++ b/auth/config/config_flags_test.go @@ -239,6 +239,20 @@ func TestConfig_SetFlags(t *testing.T) { } }) }) + t.Run("Test_userAuth.httpProxyURL", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := DefaultConfig.UserAuth.HTTPProxyURL.String() + + cmdFlags.Set("userAuth.httpProxyURL", testValue) + if vString, err := cmdFlags.GetString("userAuth.httpProxyURL"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.UserAuth.HTTPProxyURL) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) t.Run("Test_userAuth.cookieHashKeySecretName", func(t *testing.T) { t.Run("Override", func(t *testing.T) { @@ -477,4 +491,18 @@ func TestConfig_SetFlags(t *testing.T) { } }) }) + t.Run("Test_appAuth.thirdPartyConfig.flyteClient.audience", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("appAuth.thirdPartyConfig.flyteClient.audience", testValue) + if vString, err := cmdFlags.GetString("appAuth.thirdPartyConfig.flyteClient.audience"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.AppAuth.ThirdParty.FlyteClientConfig.Audience) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) } diff --git a/auth/config/samesite_enumer.go b/auth/config/samesite_enumer.go index af9bfdf6c..e42e58fbb 100644 --- a/auth/config/samesite_enumer.go +++ b/auth/config/samesite_enumer.go @@ -1,6 +1,5 @@ // Code generated by "enumer --type=SameSite --trimprefix=SameSite -json"; DO NOT EDIT. -// package config import ( diff --git a/auth/config/third_party_config.go b/auth/config/third_party_config.go index b7474d610..3b9b97896 100644 --- a/auth/config/third_party_config.go +++ b/auth/config/third_party_config.go @@ -10,6 +10,7 @@ type FlyteClientConfig struct { ClientID string `json:"clientId" pflag:",public identifier for the app which handles authorization for a Flyte deployment"` RedirectURI string `json:"redirectUri" pflag:",This is the callback uri registered with the app which handles authorization for a Flyte deployment"` Scopes []string `json:"scopes" pflag:",Recommended scopes for the client to request."` + Audience string `json:"audience" pflag:",Audience to use when initiating OAuth2 authorization requests."` } func (o ThirdPartyConfigOptions) IsEmpty() bool { diff --git a/auth/create_secrets.go b/auth/create_secrets.go new file mode 100644 index 000000000..a63dd6122 --- /dev/null +++ b/auth/create_secrets.go @@ -0,0 +1,185 @@ +package auth + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "k8s.io/client-go/rest" + + "github.com/flyteorg/flytestdlib/logger" + kubeErrors "k8s.io/apimachinery/pkg/api/errors" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/flyteorg/flyteadmin/pkg/config" + executioncluster "github.com/flyteorg/flyteadmin/pkg/executioncluster/impl" + "github.com/flyteorg/flyteadmin/pkg/executioncluster/interfaces" + "github.com/flyteorg/flyteadmin/pkg/runtime" + "github.com/flyteorg/flytestdlib/errors" + "github.com/flyteorg/flytestdlib/promutils" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "k8s.io/client-go/kubernetes" +) + +const ( + PodNamespaceEnvVar = "POD_NAMESPACE" + podDefaultNamespace = "default" +) + +var ( + secretName string + secretsLocalPath string + forceUpdate bool +) + +func GetCreateSecretsCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "create", + Long: `Creates a new secret (or noop if one exists unless --force is provided) using keys found in the provided path. +If POD_NAMESPACE env var is set, the secret will be created in that namespace. +`, + Example: ` +Create a secret using default name (flyte-admin-auth) in default namespace +flyteadmin secret create --fromPath=/path/in/container + +Override an existing secret if one exists (reads secrets from default path /etc/secrets/): +flyteadmin secret create --name "my-auth-secrets" --force +`, + RunE: func(cmd *cobra.Command, args []string) error { + return persistSecrets(context.Background(), cmd.Flags()) + }, + } + cmd.Flags().StringVar(&secretName, "name", "flyte-admin-auth", "Chooses secret name to create/update") + cmd.Flags().StringVar(&secretsLocalPath, "fromPath", filepath.Join(string(os.PathSeparator), "etc", "secrets"), "Chooses secret name to create/update") + cmd.Flags().BoolVarP(&forceUpdate, "force", "f", false, "Whether to update the secret if one exists") + + return cmd +} + +func persistSecrets(ctx context.Context, _ *pflag.FlagSet) error { + configuration := runtime.NewConfigurationProvider() + scope := promutils.NewScope(configuration.ApplicationConfiguration().GetTopLevelConfig().MetricsScope) + initializationErrorCounter := scope.NewSubScope("secrets").MustNewCounter( + "flyteclient_initialization_error", + "count of errors encountered initializing a flyte client from kube config") + + var listTargetsProvider interfaces.ListTargetsInterface + var err error + if len(configuration.ClusterConfiguration().GetClusterConfigs()) == 0 { + serverConfig := config.GetConfig() + listTargetsProvider, err = executioncluster.NewInCluster(initializationErrorCounter, serverConfig.KubeConfig, serverConfig.Master) + } else { + listTargetsProvider, err = executioncluster.NewListTargets(initializationErrorCounter, executioncluster.NewExecutionTargetProvider(), configuration.ClusterConfiguration()) + } + if err != nil { + return err + } + + targets := listTargetsProvider.GetValidTargets() + // Since we are targeting the cluster Admin is running in, this list should contain exactly one item + if len(targets) != 1 { + return fmt.Errorf("expected exactly 1 valid target cluster. Found [%v]", len(targets)) + } + var clusterCfg rest.Config + for _, target := range targets { + // We've just ascertained targets contains exactly 1 item, so we can safely assume we'll assign the clusterCfg + // from that one item now. + clusterCfg = target.Config + } + + kubeClient, err := kubernetes.NewForConfig(&clusterCfg) + if err != nil { + return errors.Wrapf("INIT", err, "Error building kubernetes clientset") + } + + podNamespace, found := os.LookupEnv(PodNamespaceEnvVar) + if !found { + podNamespace = podDefaultNamespace + } + + secretsData, err := buildK8sSecretData(ctx, secretsLocalPath) + if err != nil { + return errors.Wrapf("INIT", err, "Error building k8s secret's data field.") + } + + secretsClient := kubeClient.CoreV1().Secrets(podNamespace) + newSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: podNamespace, + }, + Type: corev1.SecretTypeOpaque, + Data: secretsData, + } + + _, err = secretsClient.Create(ctx, newSecret, metav1.CreateOptions{}) + + if err != nil && kubeErrors.IsAlreadyExists(err) { + if forceUpdate { + logger.Infof(ctx, "A secret already exists with the same name. Attempting to update it.") + _, err = secretsClient.Update(ctx, newSecret, metav1.UpdateOptions{}) + } else { + var existingSecret *corev1.Secret + existingSecret, err = secretsClient.Get(ctx, newSecret.Name, metav1.GetOptions{}) + if err != nil { + logger.Infof(ctx, "Failed to retrieve existing secret. Error: %v", err) + return err + } + + if existingSecret.Data == nil { + existingSecret.Data = map[string][]byte{} + } + + needsUpdate := false + for key, val := range secretsData { + if _, found := existingSecret.Data[key]; !found { + existingSecret.Data[key] = val + needsUpdate = true + } + } + + if needsUpdate { + _, err = secretsClient.Update(ctx, existingSecret, metav1.UpdateOptions{}) + if err != nil && kubeErrors.IsConflict(err) { + logger.Infof(ctx, "Another instance of flyteadmin has updated the same secret. Ignoring this update") + err = nil + } + } + } + + return err + } + + return err +} + +func buildK8sSecretData(_ context.Context, localPath string) (map[string][]byte, error) { + secretsData := make(map[string][]byte, 4) + + err := filepath.Walk(localPath, func(path string, info os.FileInfo, err error) error { + if err != nil || info.IsDir() { + return nil + } + + data, err := ioutil.ReadFile(path) + if err != nil { + return err + } + + secretsData[strings.TrimPrefix(path, filepath.Dir(path)+string(filepath.Separator))] = data + return nil + }) + + if err != nil { + return nil, err + } + + return secretsData, nil +} diff --git a/auth/handlers.go b/auth/handlers.go index 26c9469c0..d3e451295 100644 --- a/auth/handlers.go +++ b/auth/handlers.go @@ -8,22 +8,19 @@ import ( "strings" "time" - "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/service" - - "golang.org/x/oauth2" - - "github.com/flyteorg/flyteadmin/pkg/common" - "google.golang.org/grpc/peer" - - "github.com/grpc-ecosystem/go-grpc-middleware/util/metautils" - "github.com/flyteorg/flyteadmin/auth/interfaces" + "github.com/flyteorg/flyteadmin/pkg/common" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/service" "github.com/flyteorg/flytestdlib/errors" "github.com/flyteorg/flytestdlib/logger" + "github.com/grpc-ecosystem/go-grpc-middleware/util/metautils" + "golang.org/x/oauth2" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" "google.golang.org/grpc/status" + "google.golang.org/protobuf/runtime/protoiface" ) const ( @@ -33,6 +30,7 @@ const ( ) type HTTPRequestToMetadataAnnotator func(ctx context.Context, request *http.Request) metadata.MD +type UserInfoForwardResponseHandler func(ctx context.Context, w http.ResponseWriter, m protoiface.MessageV1) error type AuthenticatedClientMeta struct { ClientIds []string @@ -59,6 +57,7 @@ func RegisterHandlers(ctx context.Context, handler interfaces.HandlerRegisterer, func RefreshTokensIfExists(ctx context.Context, authCtx interfaces.AuthenticationContext, authHandler http.HandlerFunc) http.HandlerFunc { return func(writer http.ResponseWriter, request *http.Request) { + ctx = context.WithValue(ctx, oauth2.HTTPClient, authCtx.GetHTTPClient()) // Since we only do one thing if there are no errors anywhere along the chain, we can save code by just // using one variable and checking for errors at the end. idToken, accessToken, refreshToken, err := authCtx.CookieManager().RetrieveTokenValues(ctx, request) @@ -142,6 +141,8 @@ func GetCallbackHandler(ctx context.Context, authCtx interfaces.AuthenticationCo logger.Debugf(ctx, "Running callback handler... for RequestURI %v", request.RequestURI) authorizationCode := request.FormValue(AuthorizationResponseCodeType) + ctx = context.WithValue(ctx, oauth2.HTTPClient, authCtx.GetHTTPClient()) + err := VerifyCsrfCookie(ctx, request) if err != nil { logger.Errorf(ctx, "Invalid CSRF token cookie %s", err) @@ -443,3 +444,24 @@ func GetLogoutEndpointHandler(ctx context.Context, authCtx interfaces.Authentica } } } + +func GetUserInfoForwardResponseHandler() UserInfoForwardResponseHandler { + return func(ctx context.Context, w http.ResponseWriter, m protoiface.MessageV1) error { + info, ok := m.(*service.UserInfoResponse) + if ok { + if info.AdditionalClaims != nil { + for k, v := range info.AdditionalClaims.GetFields() { + jsonBytes, err := v.MarshalJSON() + if err != nil { + logger.Warningf(ctx, "failed to marshal claim [%s] to json: %v", k, err) + continue + } + header := fmt.Sprintf("X-User-Claim-%s", strings.ReplaceAll(k, "_", "-")) + w.Header().Set(header, string(jsonBytes)) + } + } + w.Header().Set("X-User-Subject", info.Subject) + } + return nil + } +} diff --git a/auth/handlers_test.go b/auth/handlers_test.go index 2f5917db9..88232de1c 100644 --- a/auth/handlers_test.go +++ b/auth/handlers_test.go @@ -10,9 +10,12 @@ import ( "strings" "testing" + "google.golang.org/protobuf/types/known/structpb" + "github.com/flyteorg/flyteadmin/auth/config" "github.com/flyteorg/flyteadmin/auth/interfaces/mocks" "github.com/flyteorg/flyteadmin/pkg/common" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/service" stdConfig "github.com/flyteorg/flytestdlib/config" "github.com/coreos/go-oidc" @@ -42,10 +45,14 @@ func setupMockedAuthContextAtEndpoint(endpoint string) *mocks.AuthenticationCont }, Scopes: []string{"openid", "other"}, } + dummyHTTPClient := &http.Client{ + Timeout: IdpConnectionTimeout, + } mockAuthCtx.OnCookieManagerMatch().Return(mockCookieHandler) mockCookieHandler.OnSetTokenCookiesMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) mockCookieHandler.OnSetUserInfoCookieMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) mockAuthCtx.OnOAuth2ClientConfigMatch(mock.Anything).Return(&dummyOAuth2Config) + mockAuthCtx.OnGetHTTPClient().Return(dummyHTTPClient) return mockAuthCtx } @@ -290,3 +297,31 @@ func TestGetOIdCMetadataEndpointRedirectHandler(t *testing.T) { assert.Equal(t, http.StatusSeeOther, w.Code) assert.Equal(t, "http://www.google.com/.well-known/openid-configuration", w.Header()["Location"][0]) } + +func TestUserInfoForwardResponseHander(t *testing.T) { + ctx := context.Background() + handler := GetUserInfoForwardResponseHandler() + w := httptest.NewRecorder() + additionalClaims := map[string]interface{}{ + "cid": "cid-id", + "ver": 1, + } + additionalClaimsStruct, err := structpb.NewStruct(additionalClaims) + assert.NoError(t, err) + resp := service.UserInfoResponse{ + Subject: "user-id", + AdditionalClaims: additionalClaimsStruct, + } + assert.NoError(t, handler(ctx, w, &resp)) + assert.Contains(t, w.Result().Header, "X-User-Subject") + assert.Equal(t, w.Result().Header["X-User-Subject"], []string{"user-id"}) + assert.Contains(t, w.Result().Header, "X-User-Claim-Cid") + assert.Equal(t, w.Result().Header["X-User-Claim-Cid"], []string{"\"cid-id\""}) + assert.Contains(t, w.Result().Header, "X-User-Claim-Ver") + assert.Equal(t, w.Result().Header["X-User-Claim-Ver"], []string{"1"}) + + w = httptest.NewRecorder() + unrelatedResp := service.OAuth2MetadataResponse{} + assert.NoError(t, handler(ctx, w, &unrelatedResp)) + assert.NotContains(t, w.Result().Header, "X-User-Subject") +} diff --git a/auth/identity_context.go b/auth/identity_context.go index eafa0dcf0..4f36bb83e 100644 --- a/auth/identity_context.go +++ b/auth/identity_context.go @@ -2,8 +2,11 @@ package auth import ( "context" + "fmt" "time" + "github.com/flyteorg/flyteplugins/go/tasks/pluginmachinery/utils" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/service" "k8s.io/apimachinery/pkg/util/sets" @@ -79,7 +82,8 @@ func (c IdentityContext) AuthenticatedAt() time.Time { } // NewIdentityContext creates a new IdentityContext. -func NewIdentityContext(audience, userID, appID string, authenticatedAt time.Time, scopes sets.String, userInfo *service.UserInfoResponse, claims map[string]interface{}) IdentityContext { +func NewIdentityContext(audience, userID, appID string, authenticatedAt time.Time, scopes sets.String, userInfo *service.UserInfoResponse, claims map[string]interface{}) ( + IdentityContext, error) { // For some reason, google IdP returns a subject in the ID Token but an empty subject in the /user_info endpoint if userInfo == nil { userInfo = &service.UserInfoResponse{} @@ -89,6 +93,14 @@ func NewIdentityContext(audience, userID, appID string, authenticatedAt time.Tim userInfo.Subject = userID } + if len(claims) > 0 { + claimsStruct, err := utils.MarshalObjToStruct(claims) + if err != nil { + return IdentityContext{}, fmt.Errorf("failed to marshal claims [%+v] to struct: %w", claims, err) + } + userInfo.AdditionalClaims = claimsStruct + } + return IdentityContext{ audience: audience, userID: userID, @@ -97,7 +109,7 @@ func NewIdentityContext(audience, userID, appID string, authenticatedAt time.Tim authenticatedAt: authenticatedAt, scopes: &scopes, claims: &claims, - } + }, nil } // IdentityContextFromContext retrieves the authenticated identity from context.Context. diff --git a/auth/identity_context_test.go b/auth/identity_context_test.go index 0cda160f3..5bee6347f 100644 --- a/auth/identity_context_test.go +++ b/auth/identity_context_test.go @@ -9,13 +9,17 @@ import ( func TestGetClaims(t *testing.T) { noClaims := map[string]interface{}(nil) - noClaimsCtx := NewIdentityContext("", "", "", time.Now(), nil, nil, nil) + noClaimsCtx, err := NewIdentityContext("", "", "", time.Now(), nil, nil, nil) + assert.NoError(t, err) assert.EqualValues(t, noClaims, noClaimsCtx.Claims()) claims := map[string]interface{}{ "groups": []string{"g1", "g2"}, "something": "else", } - withClaimsCtx := NewIdentityContext("", "", "", time.Now(), nil, nil, claims) + withClaimsCtx, err := NewIdentityContext("", "", "", time.Now(), nil, nil, claims) + assert.NoError(t, err) assert.EqualValues(t, claims, withClaimsCtx.Claims()) + + assert.NotEmpty(t, withClaimsCtx.UserInfo().AdditionalClaims) } diff --git a/auth/token.go b/auth/token.go index 5acdd3203..e358a230f 100644 --- a/auth/token.go +++ b/auth/token.go @@ -136,5 +136,5 @@ func IdentityContextFromIDTokenToken(ctx context.Context, tokenStr, clientID str // TODO: Document why automatically specify "all" scope return NewIdentityContext(idToken.Audience[0], idToken.Subject, "", idToken.IssuedAt, - sets.NewString(ScopeAll), userInfo, claims), nil + sets.NewString(ScopeAll), userInfo, claims) } diff --git a/boilerplate/flyte/end2end/run-tests.py b/boilerplate/flyte/end2end/run-tests.py old mode 100755 new mode 100644 index b868be57f..b48f2be9d --- a/boilerplate/flyte/end2end/run-tests.py +++ b/boilerplate/flyte/end2end/run-tests.py @@ -21,8 +21,14 @@ # inputs. This is so we can progressively cover all priorities in the original flytesnacks manifest, # starting with "core". FLYTESNACKS_WORKFLOW_GROUPS: Mapping[str, List[Tuple[str, dict]]] = { + "lite": [ + ("core.flyte_basics.hello_world.my_wf", {}), + ("core.flyte_basics.lp.go_greet", {"day_of_week": "5", "number": 3, "am": True}), + ], "core": [ - ("core.control_flow.chain_entities.chain_workflows_wf", {}), + ("core.flyte_basics.deck.wf", {}), + # The chain_workflows example in flytesnacks expects to be running in a sandbox. + # ("core.control_flow.chain_entities.chain_workflows_wf", {}), ("core.control_flow.dynamics.wf", {"s1": "Pear", "s2": "Earth"}), ("core.control_flow.map_task.my_map_workflow", {"a": [1, 2, 3, 4, 5]}), # Workflows that use nested executions cannot be launched via flyteremote. @@ -52,7 +58,7 @@ # ("core.type_system.enums.enum_wf", {"c": "red"}), ("core.type_system.schema.df_wf", {"a": 42}), ("core.type_system.typed_schema.wf", {}), - ("my.imperative.workflow.example", {"in1": "hello", "in2": "foo"}), + #("my.imperative.workflow.example", {"in1": "hello", "in2": "foo"}), ], "integrations-k8s-spark": [ ("k8s_spark.pyspark_pi.my_spark", {"triggered_date": datetime.datetime.now()}), @@ -63,9 +69,9 @@ "integrations-kftensorflow": [ ("kftensorflow.tf_mnist.mnist_tensorflow_workflow", {}), ], - "integrations-pod": [ - ("pod.pod.pod_workflow", {}), - ], + # "integrations-pod": [ + # ("pod.pod.pod_workflow", {}), + # ], "integrations-pandera_examples": [ ("pandera_examples.basic_schema_example.process_data", {}), # TODO: investigate type mismatch float -> numpy.float64 @@ -98,10 +104,15 @@ def executions_finished(executions_by_wfgroup: Dict[str, List[FlyteWorkflowExecu return True def sync_executions(remote: FlyteRemote, executions_by_wfgroup: Dict[str, List[FlyteWorkflowExecution]]): - for executions in executions_by_wfgroup.values(): - for execution in executions: - print(f"About to sync execution_id={execution.id.name}") - remote.sync(execution) + try: + for executions in executions_by_wfgroup.values(): + for execution in executions: + print(f"About to sync execution_id={execution.id.name}") + remote.sync(execution) + except: + print("GOT TO THE EXCEPT") + print("COUNT THIS!") + def report_executions(executions_by_wfgroup: Dict[str, List[FlyteWorkflowExecution]]): for executions in executions_by_wfgroup.values(): @@ -185,10 +196,11 @@ def run( f"{flytesnacks_release_tag}/cookbook/flyte_tests_manifest.json" r = requests.get(manifest_url) parsed_manifest = r.json() + workflow_groups = [] + workflow_groups = ["lite"] if "lite" in priorities else [ + group["name"] for group in parsed_manifest if group["priority"] in priorities + ] - workflow_groups = [ - group["name"] for group in parsed_manifest if group["priority"] in priorities - ] results = [] valid_workgroups = [] for workflow_group in workflow_groups: diff --git a/cmd/entrypoints/k8s_secret.go b/cmd/entrypoints/k8s_secret.go index 54dd3c36c..353f47f0d 100644 --- a/cmd/entrypoints/k8s_secret.go +++ b/cmd/entrypoints/k8s_secret.go @@ -1,43 +1,8 @@ package entrypoints import ( - "context" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "k8s.io/client-go/rest" - - "github.com/flyteorg/flytestdlib/logger" - kubeErrors "k8s.io/apimachinery/pkg/api/errors" - "github.com/flyteorg/flyteadmin/auth" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/flyteorg/flyteadmin/pkg/config" - executioncluster "github.com/flyteorg/flyteadmin/pkg/executioncluster/impl" - "github.com/flyteorg/flyteadmin/pkg/executioncluster/interfaces" - "github.com/flyteorg/flyteadmin/pkg/runtime" - "github.com/flyteorg/flytestdlib/errors" - "github.com/flyteorg/flytestdlib/promutils" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - "k8s.io/client-go/kubernetes" -) - -const ( - PodNamespaceEnvVar = "POD_NAMESPACE" - podDefaultNamespace = "default" -) - -var ( - secretName string - secretsLocalPath string - forceUpdate bool ) var secretsCmd = &cobra.Command{ @@ -45,150 +10,8 @@ var secretsCmd = &cobra.Command{ Aliases: []string{"secrets"}, } -var secretsPersistCmd = &cobra.Command{ - Use: "create", - Long: `Creates a new secret (or noop if one exists unless --force is provided) using keys found in the provided path. -If POD_NAMESPACE env var is set, the secret will be created in that namespace. -`, - Example: ` -Create a secret using default name (flyte-admin-auth) in default namespace -flyteadmin secret create --fromPath=/path/in/container - -Override an existing secret if one exists (reads secrets from default path /etc/secrets/): -flyteadmin secret create --name "my-auth-secrets" --force -`, - RunE: func(cmd *cobra.Command, args []string) error { - return persistSecrets(context.Background(), cmd.Flags()) - }, -} - func init() { - secretsPersistCmd.Flags().StringVar(&secretName, "name", "flyte-admin-auth", "Chooses secret name to create/update") - secretsPersistCmd.Flags().StringVar(&secretsLocalPath, "fromPath", filepath.Join(string(os.PathSeparator), "etc", "secrets"), "Chooses secret name to create/update") - secretsPersistCmd.Flags().BoolVarP(&forceUpdate, "force", "f", false, "Whether to update the secret if one exists") - secretsCmd.AddCommand(secretsPersistCmd) + secretsCmd.AddCommand(auth.GetCreateSecretsCommand()) secretsCmd.AddCommand(auth.GetInitSecretsCommand()) - RootCmd.AddCommand(secretsCmd) } - -func buildK8sSecretData(_ context.Context, localPath string) (map[string][]byte, error) { - secretsData := make(map[string][]byte, 4) - - err := filepath.Walk(localPath, func(path string, info os.FileInfo, err error) error { - if err != nil || info.IsDir() { - return nil - } - - data, err := ioutil.ReadFile(path) - if err != nil { - return err - } - - secretsData[strings.TrimPrefix(path, filepath.Dir(path)+string(filepath.Separator))] = data - return nil - }) - - if err != nil { - return nil, err - } - - return secretsData, nil -} - -func persistSecrets(ctx context.Context, _ *pflag.FlagSet) error { - configuration := runtime.NewConfigurationProvider() - scope := promutils.NewScope(configuration.ApplicationConfiguration().GetTopLevelConfig().MetricsScope) - initializationErrorCounter := scope.NewSubScope("secrets").MustNewCounter( - "flyteclient_initialization_error", - "count of errors encountered initializing a flyte client from kube config") - - var listTargetsProvider interfaces.ListTargetsInterface - var err error - if len(configuration.ClusterConfiguration().GetClusterConfigs()) == 0 { - serverConfig := config.GetConfig() - listTargetsProvider, err = executioncluster.NewInCluster(initializationErrorCounter, serverConfig.KubeConfig, serverConfig.Master) - } else { - listTargetsProvider, err = executioncluster.NewListTargets(initializationErrorCounter, executioncluster.NewExecutionTargetProvider(), configuration.ClusterConfiguration()) - } - if err != nil { - return err - } - - targets := listTargetsProvider.GetValidTargets() - // Since we are targeting the cluster Admin is running in, this list should contain exactly one item - if len(targets) != 1 { - return fmt.Errorf("expected exactly 1 valid target cluster. Found [%v]", len(targets)) - } - var clusterCfg rest.Config - for _, target := range targets { - // We've just ascertained targets contains exactly 1 item, so we can safely assume we'll assign the clusterCfg - // from that one item now. - clusterCfg = target.Config - } - - kubeClient, err := kubernetes.NewForConfig(&clusterCfg) - if err != nil { - return errors.Wrapf("INIT", err, "Error building kubernetes clientset") - } - - podNamespace, found := os.LookupEnv(PodNamespaceEnvVar) - if !found { - podNamespace = podDefaultNamespace - } - - secretsData, err := buildK8sSecretData(ctx, secretsLocalPath) - if err != nil { - return errors.Wrapf("INIT", err, "Error building k8s secret's data field.") - } - - secretsClient := kubeClient.CoreV1().Secrets(podNamespace) - newSecret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: podNamespace, - }, - Type: corev1.SecretTypeOpaque, - Data: secretsData, - } - - _, err = secretsClient.Create(ctx, newSecret, metav1.CreateOptions{}) - - if err != nil && kubeErrors.IsAlreadyExists(err) { - if forceUpdate { - logger.Infof(ctx, "A secret already exists with the same name. Attempting to update it.") - _, err = secretsClient.Update(ctx, newSecret, metav1.UpdateOptions{}) - } else { - var existingSecret *corev1.Secret - existingSecret, err = secretsClient.Get(ctx, newSecret.Name, metav1.GetOptions{}) - if err != nil { - logger.Infof(ctx, "Failed to retrieve existing secret. Error: %v", err) - return err - } - - if existingSecret.Data == nil { - existingSecret.Data = map[string][]byte{} - } - - needsUpdate := false - for key, val := range secretsData { - if _, found := existingSecret.Data[key]; !found { - existingSecret.Data[key] = val - needsUpdate = true - } - } - - if needsUpdate { - _, err = secretsClient.Update(ctx, existingSecret, metav1.UpdateOptions{}) - if err != nil && kubeErrors.IsConflict(err) { - logger.Infof(ctx, "Another instance of flyteadmin has updated the same secret. Ignoring this update") - err = nil - } - } - } - - return err - } - - return err -} diff --git a/dataproxy/service.go b/dataproxy/service.go index 7a8d689ef..948c6a25c 100644 --- a/dataproxy/service.go +++ b/dataproxy/service.go @@ -6,9 +6,15 @@ import ( "encoding/base64" "fmt" "net/url" - "strings" + "reflect" "time" + "github.com/flyteorg/flyteadmin/pkg/errors" + "google.golang.org/grpc/codes" + + "github.com/flyteorg/flyteadmin/pkg/manager/interfaces" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyteplugins/go/tasks/pluginmachinery/ioutils" "google.golang.org/protobuf/types/known/durationpb" @@ -27,9 +33,10 @@ import ( type Service struct { service.DataProxyServiceServer - cfg config.DataProxyConfig - dataStore *storage.DataStore - shardSelector ioutils.ShardSelector + cfg config.DataProxyConfig + dataStore *storage.DataStore + shardSelector ioutils.ShardSelector + nodeExecutionManager interfaces.NodeExecutionInterface } // CreateUploadLocation creates a temporary signed url to allow callers to upload content. @@ -37,20 +44,20 @@ func (s Service) CreateUploadLocation(ctx context.Context, req *service.CreateUp *service.CreateUploadLocationResponse, error) { if len(req.Project) == 0 || len(req.Domain) == 0 { - return nil, fmt.Errorf("prjoect and domain are required parameters") + return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "project and domain are required parameters") } if len(req.ContentMd5) == 0 { - return nil, fmt.Errorf("content_md5 is a required parameter") + return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "content_md5 is a required parameter") } if expiresIn := req.ExpiresIn; expiresIn != nil { if !expiresIn.IsValid() { - return nil, fmt.Errorf("expiresIn [%v] is invalid", expiresIn) + return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "expiresIn [%v] is invalid", expiresIn) } if expiresIn.AsDuration() > s.cfg.Upload.MaxExpiresIn.Duration { - return nil, fmt.Errorf("expiresIn [%v] cannot exceed max allowed expiration [%v]", + return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "expiresIn [%v] cannot exceed max allowed expiration [%v]", expiresIn.AsDuration().String(), s.cfg.Upload.MaxExpiresIn.String()) } } else { @@ -64,10 +71,10 @@ func (s Service) CreateUploadLocation(ctx context.Context, req *service.CreateUp md5 := base64.StdEncoding.EncodeToString(req.ContentMd5) urlSafeMd5 := base32.StdEncoding.EncodeToString(req.ContentMd5) - storagePath, err := createShardedStorageLocation(ctx, s.shardSelector, s.dataStore, s.cfg.Upload, + storagePath, err := createStorageLocation(ctx, s.dataStore, s.cfg.Upload, req.Project, req.Domain, urlSafeMd5, req.Filename) if err != nil { - return nil, err + return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to create shardedStorageLocation, Error: %v", err) } resp, err := s.dataStore.CreateSignedURL(ctx, storagePath, storage.SignedURLProperties{ @@ -77,7 +84,7 @@ func (s Service) CreateUploadLocation(ctx context.Context, req *service.CreateUp }) if err != nil { - return nil, fmt.Errorf("failed to create a signed url. Error: %w", err) + return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to create a signed url. Error: %v", err) } return &service.CreateUploadLocationResponse{ @@ -87,12 +94,57 @@ func (s Service) CreateUploadLocation(ctx context.Context, req *service.CreateUp }, nil } +// CreateDownloadLink retrieves the requested artifact type for a given execution (wf, node, task) as a signed url(s). +func (s Service) CreateDownloadLink(ctx context.Context, req *service.CreateDownloadLinkRequest) ( + resp *service.CreateDownloadLinkResponse, err error) { + if req, err = s.validateCreateDownloadLinkRequest(req); err != nil { + return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "error while validating request. Error: %v", err) + } + + // Lookup task, node, workflow execution + var nativeURL string + if nodeExecutionIDEnvelope, casted := req.GetSource().(*service.CreateDownloadLinkRequest_NodeExecutionId); casted { + node, err := s.nodeExecutionManager.GetNodeExecution(ctx, admin.NodeExecutionGetRequest{ + Id: nodeExecutionIDEnvelope.NodeExecutionId, + }) + + if err != nil { + return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "failed to find node execution [%v]. Error: %v", nodeExecutionIDEnvelope.NodeExecutionId, err) + } + + switch req.GetArtifactType() { + case service.ArtifactType_ARTIFACT_TYPE_DECK: + nativeURL = node.Closure.DeckUri + } + } else { + return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "unsupported source [%v]", reflect.TypeOf(req.GetSource())) + } + + if len(nativeURL) == 0 { + return nil, errors.NewFlyteAdminErrorf(codes.Internal, "no deckUrl found for request [%+v]", req) + } + + signedURLResp, err := s.dataStore.CreateSignedURL(ctx, storage.DataReference(nativeURL), storage.SignedURLProperties{ + Scope: stow.ClientMethodGet, + ExpiresIn: req.ExpiresIn.AsDuration(), + }) + + if err != nil { + return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to create a signed url. Error: %v", err) + } + + return &service.CreateDownloadLinkResponse{ + SignedUrl: []string{signedURLResp.URL.String()}, + ExpiresAt: timestamppb.New(time.Now().Add(req.ExpiresIn.AsDuration())), + }, nil +} + // CreateDownloadLocation creates a temporary signed url to allow callers to download content. func (s Service) CreateDownloadLocation(ctx context.Context, req *service.CreateDownloadLocationRequest) ( *service.CreateDownloadLocationResponse, error) { if err := s.validateCreateDownloadLocationRequest(req); err != nil { - return nil, err + return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "error while validating request: %v", err) } resp, err := s.dataStore.CreateSignedURL(ctx, storage.DataReference(req.NativeUrl), storage.SignedURLProperties{ @@ -101,7 +153,7 @@ func (s Service) CreateDownloadLocation(ctx context.Context, req *service.Create }) if err != nil { - return nil, fmt.Errorf("failed to create a signed url. Error: %w", err) + return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to create a signed url. Error: %v", err) } return &service.CreateDownloadLocationResponse{ @@ -111,22 +163,13 @@ func (s Service) CreateDownloadLocation(ctx context.Context, req *service.Create } func (s Service) validateCreateDownloadLocationRequest(req *service.CreateDownloadLocationRequest) error { - if expiresIn := req.ExpiresIn; expiresIn != nil { - if !expiresIn.IsValid() { - return fmt.Errorf("expiresIn [%v] is invalid", expiresIn) - } - - if expiresIn.AsDuration() < 0 { - return fmt.Errorf("expiresIn [%v] should not less than 0", - expiresIn.AsDuration().String()) - } else if expiresIn.AsDuration() > s.cfg.Download.MaxExpiresIn.Duration { - return fmt.Errorf("expiresIn [%v] cannot exceed max allowed expiration [%v]", - expiresIn.AsDuration().String(), s.cfg.Download.MaxExpiresIn.String()) - } - } else { - req.ExpiresIn = durationpb.New(s.cfg.Download.MaxExpiresIn.Duration) + validatedExpiresIn, err := validateDuration(req.ExpiresIn, s.cfg.Download.MaxExpiresIn.Duration) + if err != nil { + return fmt.Errorf("expiresIn is invalid. Error: %w", err) } + req.ExpiresIn = validatedExpiresIn + if _, err := url.Parse(req.NativeUrl); err != nil { return fmt.Errorf("failed to parse native_url [%v]", req.NativeUrl) @@ -135,23 +178,52 @@ func (s Service) validateCreateDownloadLocationRequest(req *service.CreateDownlo return nil } -// createShardedStorageLocation creates a location in storage destination to maximize read/write performance in most -// block stores. The final location should look something like: s3://// -func createShardedStorageLocation(ctx context.Context, shardSelector ioutils.ShardSelector, store *storage.DataStore, - cfg config.DataProxyUploadConfig, keyParts ...string) (storage.DataReference, error) { - keySuffixArr := make([]string, 0, 4) - if len(cfg.StoragePrefix) > 0 { - keySuffixArr = append(keySuffixArr, cfg.StoragePrefix) +func validateDuration(input *durationpb.Duration, maxAllowed time.Duration) (*durationpb.Duration, error) { + if input == nil { + return durationpb.New(maxAllowed), nil + } + + if !input.IsValid() { + return nil, fmt.Errorf("input duration [%v] is invalid", input) + } + + if input.AsDuration() < 0 { + return nil, fmt.Errorf("input duration [%v] should not less than 0", + input.AsDuration().String()) + } else if input.AsDuration() > maxAllowed { + return nil, fmt.Errorf("input duration [%v] cannot exceed max allowed expiration [%v]", + input.AsDuration(), maxAllowed) } - keySuffixArr = append(keySuffixArr, keyParts...) - prefix, err := shardSelector.GetShardPrefix(ctx, []byte(strings.Join(keySuffixArr, "/"))) + return input, nil +} + +func (s Service) validateCreateDownloadLinkRequest(req *service.CreateDownloadLinkRequest) (*service.CreateDownloadLinkRequest, error) { + validatedExpiresIn, err := validateDuration(req.ExpiresIn, s.cfg.Download.MaxExpiresIn.Duration) if err != nil { - return "", err + return nil, fmt.Errorf("expiresIn is invalid. Error: %w", err) + } + + req.ExpiresIn = validatedExpiresIn + + if req.GetArtifactType() == service.ArtifactType_ARTIFACT_TYPE_UNDEFINED { + return nil, fmt.Errorf("invalid artifact type [%v]", req.GetArtifactType()) + } + + if req.GetSource() == nil { + return nil, fmt.Errorf("source is required. Provided nil") } + return req, nil +} + +// createStorageLocation creates a location in storage destination to maximize read/write performance in most +// block stores. The final location should look something like: s3:/// +func createStorageLocation(ctx context.Context, store *storage.DataStore, + cfg config.DataProxyUploadConfig, keyParts ...string) (storage.DataReference, error) { + storagePath, err := store.ConstructReference(ctx, store.GetBaseContainerFQN(ctx), - append([]string{prefix}, keySuffixArr...)...) + append([]string{cfg.StoragePrefix}, keyParts...)...) if err != nil { return "", fmt.Errorf("failed to construct datastore reference. Error: %w", err) } @@ -159,7 +231,10 @@ func createShardedStorageLocation(ctx context.Context, shardSelector ioutils.Sha return storagePath, nil } -func NewService(cfg config.DataProxyConfig, dataStore *storage.DataStore) (Service, error) { +func NewService(cfg config.DataProxyConfig, + nodeExec interfaces.NodeExecutionInterface, + dataStore *storage.DataStore) (Service, error) { + // Context is not used in the constructor. Should ideally be removed. selector, err := ioutils.NewBase36PrefixShardSelector(context.TODO()) if err != nil { @@ -167,8 +242,9 @@ func NewService(cfg config.DataProxyConfig, dataStore *storage.DataStore) (Servi } return Service{ - cfg: cfg, - dataStore: dataStore, - shardSelector: selector, + cfg: cfg, + dataStore: dataStore, + shardSelector: selector, + nodeExecutionManager: nodeExec, }, nil } diff --git a/dataproxy/service_test.go b/dataproxy/service_test.go index 074c052e9..261b0f086 100644 --- a/dataproxy/service_test.go +++ b/dataproxy/service_test.go @@ -5,6 +5,12 @@ import ( "testing" "time" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" + + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" + + "github.com/flyteorg/flyteadmin/pkg/manager/mocks" + commonMocks "github.com/flyteorg/flyteadmin/pkg/common/mocks" stdlibConfig "github.com/flyteorg/flytestdlib/config" @@ -14,7 +20,6 @@ import ( "github.com/flyteorg/flytestdlib/promutils/labeled" "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/service" - "github.com/flyteorg/flyteplugins/go/tasks/pluginmachinery/ioutils" "github.com/flyteorg/flyteadmin/pkg/config" "github.com/flyteorg/flytestdlib/promutils" @@ -25,9 +30,11 @@ import ( func TestNewService(t *testing.T) { dataStore, err := storage.NewDataStore(&storage.Config{Type: storage.TypeMemory}, promutils.NewTestScope()) assert.NoError(t, err) + + nodeExecutionManager := &mocks.MockNodeExecutionManager{} s, err := NewService(config.DataProxyConfig{ Upload: config.DataProxyUploadConfig{}, - }, dataStore) + }, nodeExecutionManager, dataStore) assert.NoError(t, err) assert.NotNil(t, s) } @@ -36,22 +43,21 @@ func init() { labeled.SetMetricKeys(contextutils.DomainKey) } -func Test_createShardedStorageLocation(t *testing.T) { - selector, err := ioutils.NewBase36PrefixShardSelector(context.TODO()) - assert.NoError(t, err) +func Test_createStorageLocation(t *testing.T) { dataStore, err := storage.NewDataStore(&storage.Config{Type: storage.TypeMemory}, promutils.NewTestScope()) assert.NoError(t, err) - loc, err := createShardedStorageLocation(context.Background(), selector, dataStore, config.DataProxyUploadConfig{ + loc, err := createStorageLocation(context.Background(), dataStore, config.DataProxyUploadConfig{ StoragePrefix: "blah", }) assert.NoError(t, err) - assert.Equal(t, "/u8/blah", loc.String()) + assert.Equal(t, "/blah", loc.String()) } func TestCreateUploadLocation(t *testing.T) { dataStore, err := storage.NewDataStore(&storage.Config{Type: storage.TypeMemory}, promutils.NewTestScope()) assert.NoError(t, err) - s, err := NewService(config.DataProxyConfig{}, dataStore) + nodeExecutionManager := &mocks.MockNodeExecutionManager{} + s, err := NewService(config.DataProxyConfig{}, nodeExecutionManager, dataStore) assert.NoError(t, err) t.Run("No project/domain", func(t *testing.T) { _, err = s.CreateUploadLocation(context.Background(), &service.CreateUploadLocationRequest{}) @@ -76,9 +82,53 @@ func TestCreateUploadLocation(t *testing.T) { }) } +func TestCreateDownloadLink(t *testing.T) { + dataStore := commonMocks.GetMockStorageClient() + nodeExecutionManager := &mocks.MockNodeExecutionManager{} + nodeExecutionManager.SetGetNodeExecutionFunc(func(ctx context.Context, request admin.NodeExecutionGetRequest) (*admin.NodeExecution, error) { + return &admin.NodeExecution{ + Closure: &admin.NodeExecutionClosure{ + DeckUri: "s3://something/something", + }, + }, nil + }) + + s, err := NewService(config.DataProxyConfig{Download: config.DataProxyDownloadConfig{MaxExpiresIn: stdlibConfig.Duration{Duration: time.Hour}}}, nodeExecutionManager, dataStore) + assert.NoError(t, err) + + t.Run("Invalid expiry", func(t *testing.T) { + _, err = s.CreateDownloadLink(context.Background(), &service.CreateDownloadLinkRequest{ + ExpiresIn: durationpb.New(-time.Hour), + }) + assert.Error(t, err) + }) + + t.Run("valid config", func(t *testing.T) { + _, err = s.CreateDownloadLink(context.Background(), &service.CreateDownloadLinkRequest{ + ArtifactType: service.ArtifactType_ARTIFACT_TYPE_DECK, + Source: &service.CreateDownloadLinkRequest_NodeExecutionId{ + NodeExecutionId: &core.NodeExecutionIdentifier{}, + }, + ExpiresIn: durationpb.New(time.Hour), + }) + assert.NoError(t, err) + }) + + t.Run("use default ExpiresIn", func(t *testing.T) { + _, err = s.CreateDownloadLink(context.Background(), &service.CreateDownloadLinkRequest{ + ArtifactType: service.ArtifactType_ARTIFACT_TYPE_DECK, + Source: &service.CreateDownloadLinkRequest_NodeExecutionId{ + NodeExecutionId: &core.NodeExecutionIdentifier{}, + }, + }) + assert.NoError(t, err) + }) +} + func TestCreateDownloadLocation(t *testing.T) { dataStore := commonMocks.GetMockStorageClient() - s, err := NewService(config.DataProxyConfig{Download: config.DataProxyDownloadConfig{MaxExpiresIn: stdlibConfig.Duration{Duration: time.Hour}}}, dataStore) + nodeExecutionManager := &mocks.MockNodeExecutionManager{} + s, err := NewService(config.DataProxyConfig{Download: config.DataProxyDownloadConfig{MaxExpiresIn: stdlibConfig.Duration{Duration: time.Hour}}}, nodeExecutionManager, dataStore) assert.NoError(t, err) t.Run("Invalid expiry", func(t *testing.T) { diff --git a/flyteadmin_config.yaml b/flyteadmin_config.yaml index 26897bd05..964f83a81 100644 --- a/flyteadmin_config.yaml +++ b/flyteadmin_config.yaml @@ -6,7 +6,7 @@ server: httpPort: 8088 grpcPort: 8089 grpcServerReflection: true - kube-config: /Users/ytong/.kube/config + kube-config: /Users/ytong/.flyte/sandbox/kubeconfig security: secure: false useAuth: false @@ -31,7 +31,6 @@ auth: - offline_access # Uncomment if OIdC supports issuing refresh tokens. # Replace with the client id created for Flyte. clientId: 0oakkheteNjCMERst5d6 - # Okta OIdC and OAuth2 #auth: # authorizedUris: @@ -63,10 +62,11 @@ flyteadmin: useOffloadedWorkflowClosure: false database: postgres: - port: 5432 + port: 30001 username: postgres - host: localhost - dbname: flyteadmin + password: postgres + host: 127.0.0.1 + dbname: flyte options: "sslmode=disable" scheduler: eventScheduler: @@ -116,14 +116,17 @@ Logger: show-source: true level: 6 storage: - type: minio - connection: - access-key: minio - auth-type: accesskey - secret-key: miniostorage - disable-ssl: true - endpoint: "http://localhost:30084" - region: my-region + type: stow + stow: + kind: s3 + config: + region: us-east-1 + disable_ssl: true + v2_signing: true + endpoint: http://localhost:30002 + auth_type: accesskey + access_key_id: minio + secret_key: miniostorage signedUrl: stowConfigOverride: endpoint: http://localhost:30084 diff --git a/go.mod b/go.mod index 841a0236b..6710d33bb 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/flyteorg/flyteadmin -go 1.18 +go 1.19 require ( cloud.google.com/go/iam v0.3.0 @@ -13,10 +13,10 @@ require ( github.com/cloudevents/sdk-go/v2 v2.8.0 github.com/coreos/go-oidc v2.2.1+incompatible github.com/evanphx/json-patch v4.12.0+incompatible - github.com/flyteorg/flyteidl v1.1.19 - github.com/flyteorg/flyteplugins v1.0.10 - github.com/flyteorg/flytepropeller v1.1.28 - github.com/flyteorg/flytestdlib v1.0.5 + github.com/flyteorg/flyteidl v1.3.14 + github.com/flyteorg/flyteplugins v1.0.40 + github.com/flyteorg/flytepropeller v1.1.70 + github.com/flyteorg/flytestdlib v1.0.15 github.com/flyteorg/stow v0.3.6 github.com/ghodss/yaml v1.0.0 github.com/go-gormigrate/gormigrate/v2 v2.0.0 @@ -32,12 +32,12 @@ require ( github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/gtank/cryptopasta v0.0.0-20170601214702-1f550f6f2f69 - github.com/jackc/pgconn v1.10.1 + github.com/jackc/pgconn v1.13.0 github.com/lestrrat-go/jwx v1.1.6 github.com/magiconair/properties v1.8.6 github.com/mitchellh/mapstructure v1.4.3 - github.com/ory/fosite v0.39.0 - github.com/ory/x v0.0.162 + github.com/ory/fosite v0.42.2 + github.com/ory/x v0.0.214 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.12.1 github.com/prometheus/client_model v0.2.0 @@ -45,16 +45,16 @@ require ( github.com/sendgrid/sendgrid-go v3.10.0+incompatible github.com/spf13/cobra v1.4.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.7.2 + github.com/stretchr/testify v1.8.0 golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 google.golang.org/api v0.76.0 google.golang.org/genproto v0.0.0-20220426171045-31bebdecfb46 google.golang.org/grpc v1.46.0 google.golang.org/protobuf v1.28.0 - gorm.io/driver/postgres v1.2.3 + gorm.io/driver/postgres v1.4.5 gorm.io/driver/sqlite v1.1.1 - gorm.io/gorm v1.22.4 + gorm.io/gorm v1.24.1-0.20221019064659-5dd2bb482755 k8s.io/api v0.24.1 k8s.io/apimachinery v0.24.1 k8s.io/client-go v0.24.1 @@ -89,11 +89,10 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/decred/dcrd/dcrec/secp256k1/v3 v3.0.0 // indirect github.com/dgraph-io/ristretto v0.0.3 // indirect - github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect github.com/eapache/go-resiliency v1.2.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect github.com/eapache/queue v1.1.0 // indirect - github.com/emicklei/go-restful v2.9.6+incompatible // indirect + github.com/emicklei/go-restful/v3 v3.8.0 // indirect github.com/fatih/color v1.13.0 // indirect github.com/felixge/httpsnoop v1.0.1 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect @@ -101,6 +100,7 @@ require ( github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.19.5 // indirect github.com/go-openapi/swag v0.19.14 // indirect + github.com/go-sql-driver/mysql v1.7.0 // indirect github.com/go-test/deep v1.0.7 // indirect github.com/goccy/go-json v0.4.8 // indirect github.com/gofrs/uuid v4.2.0+incompatible // indirect @@ -118,13 +118,13 @@ require ( github.com/jackc/chunkreader/v2 v2.0.1 // indirect github.com/jackc/pgio v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgproto3/v2 v2.2.0 // indirect + github.com/jackc/pgproto3/v2 v2.3.1 // indirect github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect - github.com/jackc/pgtype v1.9.0 // indirect - github.com/jackc/pgx/v4 v4.14.0 // indirect + github.com/jackc/pgtype v1.12.0 // indirect + github.com/jackc/pgx/v4 v4.17.2 // indirect github.com/jcmturner/gofork v1.0.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect - github.com/jinzhu/now v1.1.4 // indirect + github.com/jinzhu/now v1.1.5 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -146,7 +146,7 @@ require ( github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/ncw/swift v1.0.53 // indirect - github.com/ory/go-acc v0.2.5 // indirect + github.com/ory/go-acc v0.2.6 // indirect github.com/ory/go-convenience v0.1.0 // indirect github.com/ory/viper v1.7.5 // indirect github.com/pborman/uuid v1.2.0 // indirect @@ -163,17 +163,17 @@ require ( github.com/spf13/cast v1.4.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/viper v1.11.0 // indirect - github.com/stretchr/objx v0.3.0 // indirect + github.com/stretchr/objx v0.4.0 // indirect github.com/subosito/gotenv v1.2.0 // indirect go.opencensus.io v0.23.0 // indirect - golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect + golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect - golang.org/x/net v0.0.0-20220607020251-c690dde0001d // indirect - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect - golang.org/x/sys v0.0.0-20220608164250-635b8c9b7f68 // indirect - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect - golang.org/x/text v0.3.7 // indirect - golang.org/x/tools v0.1.11 // indirect + golang.org/x/net v0.7.0 // indirect + golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/term v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect + golang.org/x/tools v0.1.12 // indirect golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect google.golang.org/appengine v1.6.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect @@ -182,11 +182,12 @@ require ( gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect gopkg.in/jcmturner/gokrb5.v7 v7.5.0 // indirect gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect - gopkg.in/square/go-jose.v2 v2.5.1 // indirect + gopkg.in/square/go-jose.v2 v2.5.2-0.20210529014059-a5c7eec3c614 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + gorm.io/driver/mysql v1.4.4 // indirect k8s.io/apiextensions-apiserver v0.24.1 // indirect - sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124 // indirect + sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) @@ -201,10 +202,10 @@ require ( go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.19.1 // indirect - k8s.io/klog/v2 v2.60.1 // indirect - k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect - k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect + k8s.io/klog/v2 v2.70.1 // indirect + k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect + k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect ) replace github.com/robfig/cron/v3 => github.com/unionai/cron/v3 v3.0.2-0.20210825070134-bfc34418fe84 diff --git a/go.sum b/go.sum index aff85441b..f2fe7c70b 100644 --- a/go.sum +++ b/go.sum @@ -17,7 +17,6 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.66.0/go.mod h1:dgqGAjKCDxyhGTtC9dAREQGUJpkceNm1yt590Qno0Ko= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= @@ -68,56 +67,38 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.12.0/go.mod h1:fFLk2dp2oAhDz8QFKwqrjdJvxSp/W2g7nillojlL5Ho= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= cloud.google.com/go/storage v1.22.0 h1:NUV0NNp9nkBuW66BFRLuMgldN60C57ET3dhbwLIYio8= cloud.google.com/go/storage v1.22.0/go.mod h1:GbaLEoMqbVm6sx3Z0R++gSiBlgMv6yUi2q1DeGFKQgE= contrib.go.opencensus.io/exporter/stackdriver v0.13.1/go.mod h1:z2tyTZtPmQ2HvWH4cOmVDgtY+1lomfKdbLnkJvZdc8c= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go v62.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v63.4.0+incompatible h1:fle3M5Q7vr8auaiPffKyUQmLbvYeqpw30bKU6PrWJFo= github.com/Azure/azure-sdk-for-go v63.4.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1/go.mod h1:fBF9PQNqB8scdgpZ3ufzaLntG0AG7C1WjPMsiFOmfHM= github.com/Azure/azure-sdk-for-go/sdk/azcore v0.23.1 h1:3CVsSo4mp8NDWO11tHzN/mdo2zP0CtaSK5IcwBjfqRA= github.com/Azure/azure-sdk-for-go/sdk/azcore v0.23.1/go.mod h1:w5pDIZuawUmY3Bj4tVx3Xb8KS96ToB0j315w9rqpAg0= github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.14.0 h1:NVS/4LOQfkBpk+B1VopIzv1ptmYeEskA8w/3K/w7vjo= -github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I= github.com/Azure/azure-sdk-for-go/sdk/internal v0.9.2 h1:Px2KVERcYEg2Lv25AqC2hVr0xUWaq94wuEObLIkYzmA= github.com/Azure/azure-sdk-for-go/sdk/internal v0.9.2/go.mod h1:CdSJQNNzZhCkwDaV27XV1w48ZBPtxe7mlrZAsPNxD5g= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.0 h1:0nJeKDmB7a1a8RDMjTltahlPsaNlWjq/LpkZleSwINk= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.0/go.mod h1:mbwxKc/fW+IkF0GG591MuXw0KuEQBDkeRoZ9vmVJPxg= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= -github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A= github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/adal v0.9.10/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.18 h1:kLnPsRjzZZUF3K5REu/Kc+qMQrvuza2bwSnNdhmzLfQ= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= -github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0 h1:WVsrXCnHlDDX8ls+tootqRE87/hL9S/g4ewig9RsD/c= @@ -127,7 +108,6 @@ github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q github.com/DataDog/datadog-go v3.4.1+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v4.0.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/opencensus-go-exporter-datadog v0.0.0-20191210083620-6965a1cfed68/go.mod h1:gMGUEe16aZh0QN941HgDjwrdjU4iTthPoz2/AtDRADE= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= @@ -144,6 +124,7 @@ github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8 github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= @@ -156,8 +137,6 @@ github.com/Shopify/sarama v1.26.4 h1:+17TxUq/PJEAfZAll0T7XJjSgQWCpaQSoki/x5yN8o8 github.com/Shopify/sarama v1.26.4/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -168,29 +147,22 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.23.4/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.23.19/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.31.3/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.37.1/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.44.2 h1:5VBk5r06bgxgRKVaUtm1/4NT/rtrnH2E4cnAYv5zgQc= github.com/aws/aws-sdk-go v1.44.2/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-xray-sdk-go v0.9.4/go.mod h1:XtMKdBQfpVut+tJEwI7+dJFRxxRdxHDyVNp2tHXRq04= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= @@ -208,7 +180,6 @@ github.com/bmatcuk/doublestar/v2 v2.0.3/go.mod h1:QMmcs3H2AUQICWhfzLXz+IYln8lRQm github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737 h1:rRISKWyXfVxvoa702s91Zl5oREZTrR3yv+tXrrX7G/g= github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737/go.mod h1:PmM6Mmwb0LSuEubjR8N7PtNe1KxZLtOUHtbeikc5h60= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= @@ -221,12 +192,10 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764= -github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudevents/sdk-go/binding/format/protobuf/v2 v2.8.0 h1:hRguaVL9rVsO8PMOpKSZ5gYZ2kjGRCvuKw4yMlfsBtg= github.com/cloudevents/sdk-go/binding/format/protobuf/v2 v2.8.0/go.mod h1:Ba4CS2d+naAK8tGd6nm5ftGIWuHim+1lryAaIxhuh1k= @@ -248,12 +217,12 @@ github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMe github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= github.com/cockroachdb/cockroach-go v0.0.0-20190925194419-606b3d062051/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= github.com/cockroachdb/cockroach-go v0.0.0-20200312223839-f565e4789405/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= @@ -268,14 +237,11 @@ github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjs github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= @@ -295,18 +261,17 @@ github.com/dgraph-io/ristretto v0.0.1/go.mod h1:T40EBc7CJke8TkpiYfGGKAeFjSaxuFXh github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.0.3 h1:jh22xisGBjrEVnRZ1DVTpBVQm0Xndu8sMl0CWDzSIBI= github.com/dgraph-io/ristretto v0.0.3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v17.12.0-ce-rc1.0.20201201034508-7d75c1d40d88+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v0.0.0-20180713052910-9f541cc9db5d/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -318,15 +283,14 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20181003060214-f58a169a71a5/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.6+incompatible h1:tfrHha8zJ01ywiOEC1miGY8st1/igzWB8OmvPgoYX7w= -github.com/emicklei/go-restful v2.9.6+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= +github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -338,30 +302,24 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ernesto-jimenez/gogen v0.0.0-20180125220232-d7d4131e6607/go.mod h1:Cg4fM0vhYWOZdgM7RIOSTRNIc8/VT7CXClC3Ni86lu4= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/structs v1.0.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flyteorg/flyteidl v1.1.19 h1:1CtSbuFhFHwUbKdv66PqbcER01iacAJU+snh0eTsXc4= -github.com/flyteorg/flyteidl v1.1.19/go.mod h1:SLTYz2JgIKvM5MbPVlMP7uILb65fnuuZQZFHHIEYh2U= -github.com/flyteorg/flyteplugins v1.0.10 h1:XBycM4aOSE/WlI8iP9vqogKGXy4FMfVCUUfzxJus/p4= -github.com/flyteorg/flyteplugins v1.0.10/go.mod h1:GfbmRByI/rSatm/Epoj3bNyrXwIQ9NOXTVwLS6Z0p84= -github.com/flyteorg/flytepropeller v1.1.28 h1:68qQ0QRHoCzagF0oifkW/c4A1L4B4LdgyHCPLKMiY2g= -github.com/flyteorg/flytepropeller v1.1.28/go.mod h1:QE3szUWkFnyFg3mMxpn3y93ZSs18T+1SQtVgNhcEMvA= -github.com/flyteorg/flytestdlib v1.0.0/go.mod h1:QSVN5wIM1lM9d60eAEbX7NwweQXW96t5x4jbyftn89c= -github.com/flyteorg/flytestdlib v1.0.5 h1:80A/vfpAJl+pgU6vxccbsYApZPrvyGhOIsCAFngsjnk= -github.com/flyteorg/flytestdlib v1.0.5/go.mod h1:WTe0k3DmmrKFjj3hwiIbjjdCK89X63MBzBbXhQ4Yxf0= -github.com/flyteorg/stow v0.3.3/go.mod h1:HBld7ud0i4khMHwJjkO8v+NSP7ddKa/ruhf4I8fliaA= +github.com/flyteorg/flyteidl v1.3.14 h1:o5M0g/r6pXTPu5PEurbYxbQmuOu3hqqsaI2M6uvK0N8= +github.com/flyteorg/flyteidl v1.3.14/go.mod h1:Pkt2skI1LiHs/2ZoekBnyPhuGOFMiuul6HHcKGZBsbM= +github.com/flyteorg/flyteplugins v1.0.40 h1:RTsYingqmqr13qBbi4CB2ArXDHNHUOkAF+HTLJQiQ/s= +github.com/flyteorg/flyteplugins v1.0.40/go.mod h1:qyUPqVspLcLGJpKxVwHDWf+kBpOGuItOxCaF6zAmDio= +github.com/flyteorg/flytepropeller v1.1.70 h1:/d1qqz13rdVADM85ST70eerAdBstJJz9UUB/mNSZi0w= +github.com/flyteorg/flytepropeller v1.1.70/go.mod h1:MezHUJmgPzm4Pu8nIy6LLiEkxNA6buTQ7hInSqCViTY= +github.com/flyteorg/flytestdlib v1.0.15 h1:kv9jDQmytbE84caY+pkZN8trJU2ouSAmESzpTEhfTt0= +github.com/flyteorg/flytestdlib v1.0.15/go.mod h1:ghw/cjY0sEWIIbyCtcJnL/Gt7ZS7gf9SUi0CCPhbz3s= github.com/flyteorg/stow v0.3.6 h1:jt50ciM14qhKBaIrB+ppXXY+SXB59FNREFgTJqCyqIk= github.com/flyteorg/stow v0.3.6/go.mod h1:5dfBitPM004dwaZdoVylVjxFT4GWAgI0ghAndhNUzCo= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= @@ -370,8 +328,6 @@ github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoD github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= github.com/frankban/quicktest v1.10.0 h1:Gfh+GAJZOAoKZsIZeZbdn2JF10kN1XHNvjsvQK8gVkE= @@ -382,9 +338,10 @@ github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWp github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-bindata/go-bindata v3.1.1+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -393,39 +350,88 @@ github.com/go-gormigrate/gormigrate/v2 v2.0.0 h1:e2A3Uznk4viUC4UuemuVgsNnvYZyOA8 github.com/go-gormigrate/gormigrate/v2 v2.0.0/go.mod h1:YuVJ+D/dNt4HWrThTBnjgZuRbt7AuwINeg4q52ZE3Jw= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.0/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= +github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= +github.com/go-openapi/runtime v0.19.26/go.mod h1:BvrQtn6iVb2QmiVXRsFAm6ZCAZBpbVKFfN6QWCp582M= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= +github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= +github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.0.7 h1:/VSMRlnY/JSyqxQUzQLKVMAskpY/NZKFA5j2P+0pP2M= github.com/go-test/deep v1.0.7/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/attrs v0.1.0/go.mod h1:fmNpaWyHM0tRm8gCZWKx8yY9fvaNLo2PyzBNSrBZ5Hw= github.com/gobuffalo/buffalo v0.12.8-0.20181004233540-fac9bb505aa8/go.mod h1:sLyT7/dceRXJUxSsE813JTQtA3Eb1vjxWfo/N//vXIY= github.com/gobuffalo/buffalo v0.13.0/go.mod h1:Mjn1Ba9wpIbpbrD+lIDMy99pQ0H0LiddMIIDGse7qT4= @@ -445,6 +451,8 @@ github.com/gobuffalo/buffalo-plugins v1.10.0/go.mod h1:4osg8d9s60txLuGwXnqH+RCjP github.com/gobuffalo/buffalo-plugins v1.11.0/go.mod h1:rtIvAYRjYibgmWhnjKmo7OadtnxuMG5ZQLr25ozAzjg= github.com/gobuffalo/buffalo-plugins v1.15.0/go.mod h1:BqSx01nwgKUQr/MArXzFkSD0QvdJidiky1OKgyfgrK8= github.com/gobuffalo/buffalo-pop v1.0.5/go.mod h1:Fw/LfFDnSmB/vvQXPvcXEjzP98Tc+AudyNWUBWKCwQ8= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= github.com/gobuffalo/envy v1.6.4/go.mod h1:Abh+Jfw475/NWtYMEt+hnJWRiC8INKWibIMyNt1w2Mc= github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= github.com/gobuffalo/envy v1.6.6/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= @@ -472,6 +480,7 @@ github.com/gobuffalo/events v1.3.1/go.mod h1:9JOkQVoyRtailYVE/JJ2ZQ/6i4gTjM5t2Hs github.com/gobuffalo/events v1.4.1/go.mod h1:SjXgWKpeSuvQDvGhgMz5IXx3Czu+IbL+XPLR41NvVQY= github.com/gobuffalo/fizz v1.0.12/go.mod h1:C0sltPxpYK8Ftvf64kbsQa2yiCZY4RZviurNxXdAKwc= github.com/gobuffalo/fizz v1.9.8/go.mod h1:w1FEn1yKNVCc49KnADGyYGRPH7jFON3ak4Bj1yUudHo= +github.com/gobuffalo/fizz v1.10.0/go.mod h1:J2XGPO0AfJ1zKw7+2BA+6FEGAkyEsdCOLvN93WCT2WI= github.com/gobuffalo/flect v0.0.0-20180907193754-dc14d8acaf9f/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= github.com/gobuffalo/flect v0.0.0-20181002182613-4571df4b1daf/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= github.com/gobuffalo/flect v0.0.0-20181007231023-ae7ed6bfe683/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= @@ -483,6 +492,9 @@ github.com/gobuffalo/flect v0.0.0-20181114183036-47375f6d8328/go.mod h1:0HvNbHdf github.com/gobuffalo/flect v0.0.0-20181210151238-24a2b68e0316/go.mod h1:en58vff74S9b99Eg42Dr+/9yPu437QjlNsO/hBYPuOk= github.com/gobuffalo/flect v0.0.0-20190104192022-4af577e09bf2/go.mod h1:en58vff74S9b99Eg42Dr+/9yPu437QjlNsO/hBYPuOk= github.com/gobuffalo/flect v0.0.0-20190117212819-a62e61d96794/go.mod h1:397QT6v05LkZkn07oJXXT6y9FCfwC8Pug0WA2/2mE9k= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= github.com/gobuffalo/flect v0.2.1/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= @@ -509,6 +521,10 @@ github.com/gobuffalo/genny v0.0.0-20181207164119-84844398a37d/go.mod h1:y0ysCHGG github.com/gobuffalo/genny v0.0.0-20181211165820-e26c8466f14d/go.mod h1:sHnK+ZSU4e2feXP3PA29ouij6PUEiN+RCwECjCTB3yM= github.com/gobuffalo/genny v0.0.0-20190104222617-a71664fc38e7/go.mod h1:QPsQ1FnhEsiU8f+O0qKWXz2RE4TiDqLVChWkBuh1WaY= github.com/gobuffalo/genny v0.0.0-20190112155932-f31a84fcacf5/go.mod h1:CIaHCrSIuJ4il6ka3Hub4DR4adDrGoXGEEt2FbBxoIo= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= github.com/gobuffalo/genny v0.2.0/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= github.com/gobuffalo/genny v0.3.0/go.mod h1:ywJ2CoXrTZj7rbS8HTbzv7uybnLKlsNSBhEQ+yFI3E8= github.com/gobuffalo/genny v0.6.0/go.mod h1:Vigx9VDiNscYpa/LwrURqGXLSIbzTfapt9+K6gF1kTA= @@ -518,6 +534,9 @@ github.com/gobuffalo/github_flavored_markdown v1.0.4/go.mod h1:uRowCdK+q8d/RF0Kt github.com/gobuffalo/github_flavored_markdown v1.0.5/go.mod h1:U0643QShPF+OF2tJvYNiYDLDGDuQmJZXsf/bHOJPsMY= github.com/gobuffalo/github_flavored_markdown v1.0.7/go.mod h1:w93Pd9Lz6LvyQXEG6DktTPHkOtCbr+arAD5mkwMzXLI= github.com/gobuffalo/github_flavored_markdown v1.1.0/go.mod h1:TSpTKWcRTI0+v7W3x8dkSKMLJSUpuVitlptCkpeY8ic= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= github.com/gobuffalo/gogen v0.2.0/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= github.com/gobuffalo/helpers v0.2.2/go.mod h1:xYbzUdCUpVzLwLnqV8HIjT6hmG0Cs7YIBCJkNM597jw= github.com/gobuffalo/helpers v0.2.4/go.mod h1:NX7v27yxPDOPTgUFYmJ5ow37EbxdoLraucOGvMNawyk= @@ -577,6 +596,7 @@ github.com/gobuffalo/packd v0.0.0-20181114190715-f25c5d2471d7/go.mod h1:Yf2toFaI github.com/gobuffalo/packd v0.0.0-20181124090624-311c6248e5fb/go.mod h1:Foenia9ZvITEvG05ab6XpiD5EfBHPL8A6hush8SJ0o8= github.com/gobuffalo/packd v0.0.0-20181207120301-c49825f8f6f4/go.mod h1:LYc0TGKFBBFTRC9dg2pcRcMqGCTMD7T2BIMP7OBuQAA= github.com/gobuffalo/packd v0.0.0-20181212173646-eca3b8fd6687/go.mod h1:LYc0TGKFBBFTRC9dg2pcRcMqGCTMD7T2BIMP7OBuQAA= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= github.com/gobuffalo/packd v0.2.0/go.mod h1:k2CkHP3bjbqL2GwxwhxUy1DgnlbW644hkLC9iIUvZwY= github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q= @@ -596,9 +616,12 @@ github.com/gobuffalo/packr/v2 v2.0.0-rc.12/go.mod h1:FV1zZTsVFi1DSCboO36Xgs4pzCZ github.com/gobuffalo/packr/v2 v2.0.0-rc.13/go.mod h1:2Mp7GhBFMdJlOK8vGfl7SYtfMP3+5roE39ejlfjw0rA= github.com/gobuffalo/packr/v2 v2.0.0-rc.14/go.mod h1:06otbrNvDKO1eNQ3b8hst+1010UooI2MFg+B2Ze4MV8= github.com/gobuffalo/packr/v2 v2.0.0-rc.15/go.mod h1:IMe7H2nJvcKXSF90y4X1rjYIRlNMJYCxEhssBXNZwWs= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/packr/v2 v2.4.0/go.mod h1:ra341gygw9/61nSjAbfwcwh8IrYL4WmR4IsPkPBhQiY= github.com/gobuffalo/packr/v2 v2.5.2/go.mod h1:sgEE1xNZ6G0FNN5xn9pevVu4nywaxHvgup67xisti08= github.com/gobuffalo/packr/v2 v2.7.1/go.mod h1:qYEvAazPaVxy7Y7KR0W8qYEE+RymX74kETFqjFoFlOc= +github.com/gobuffalo/packr/v2 v2.8.0/go.mod h1:PDk2k3vGevNE3SwVyVRgQCCXETC9SaONCNSXT1Q8M1g= github.com/gobuffalo/plush v3.7.16+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= github.com/gobuffalo/plush v3.7.20+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= github.com/gobuffalo/plush v3.7.21+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= @@ -620,6 +643,7 @@ github.com/gobuffalo/pop v4.8.3+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVD github.com/gobuffalo/pop v4.8.4+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= github.com/gobuffalo/pop v4.13.1+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= github.com/gobuffalo/pop/v5 v5.0.11/go.mod h1:mZJHJbA3cy2V18abXYuVop2ldEJ8UZ2DK6qOekC5u5g= +github.com/gobuffalo/pop/v5 v5.3.1/go.mod h1:vcEDhh6cJ3WVENqJDFt/6z7zNb7lLnlN8vj3n5G9rYA= github.com/gobuffalo/release v1.0.35/go.mod h1:VtHFAKs61vO3wboCec5xr9JPTjYyWYcvaM3lclkc4x4= github.com/gobuffalo/release v1.0.38/go.mod h1:VtHFAKs61vO3wboCec5xr9JPTjYyWYcvaM3lclkc4x4= github.com/gobuffalo/release v1.0.42/go.mod h1:RPs7EtafH4oylgetOJpGP0yCZZUiO4vqHfTHJjSdpug= @@ -684,7 +708,6 @@ github.com/golang/gddo v0.0.0-20190904175337-72a348e765d2/go.mod h1:xEhNfoBDX1hz github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -765,7 +788,6 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200905233945-acf8798be1f7/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -774,7 +796,6 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/readahead v0.0.0-20161222183148-eaceba169032/go.mod h1:qYysrqQXuV4tzsizt4oOQ6mrBZQ0xnQXP3ylXX8Jk5Y= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -789,7 +810,6 @@ github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0 github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0 h1:nRJtk3y8Fm770D42QV6T90ZnvFZyk7agSo3Q+Z9p3WI= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/go-type-adapters v1.0.0 h1:9XdMn+d/G57qq1s8dNc5IesGCXHf6V2HZ2JwRxfA2tA= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= @@ -809,7 +829,6 @@ github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyC github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.1.2/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= github.com/gorilla/sessions v1.1.3/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -817,28 +836,25 @@ github.com/gotestyourself/gotestyourself v1.3.0/go.mod h1:zZKM6oeNM8k+FRljX1mnzV github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/gtank/cryptopasta v0.0.0-20170601214702-1f550f6f2f69 h1:7xsUJsB2NrdcttQPa7JLEaGzvdbk7KvfrjgHZXOQRo0= github.com/gtank/cryptopasta v0.0.0-20170601214702-1f550f6f2f69/go.mod h1:YLEMZOtU+AZ7dhN9T/IpGhXVGly2bvkJQ+zxj3WeVQo= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-retryablehttp v0.6.8/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= @@ -846,7 +862,6 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -859,7 +874,6 @@ github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0m github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -867,7 +881,7 @@ github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/inhies/go-bytesize v0.0.0-20201103132853-d0aed0d254f8/go.mod h1:KrtyD5PFj++GKkFS/7/RRrfnRhAMGQwy75GLCHWrCNs= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= @@ -880,12 +894,13 @@ github.com/jackc/pgconn v1.3.2/go.mod h1:LvCquS3HbBKwgl7KbX9KyqEIumJAbm1UMcTvGaI github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk= github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= +github.com/jackc/pgconn v1.6.0/go.mod h1:yeseQo4xhQbgyJs2c87RAXOH2i624N0Fh1KSPJya7qo= github.com/jackc/pgconn v1.6.4/go.mod h1:w2pne1C2tZgP+TvjqLpOigGzNqjBgQW9dUw/4Chex78= github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= -github.com/jackc/pgconn v1.10.1 h1:DzdIHIjG1AxGwoEEqS+mGsURyjt4enSmqzACXvVzOT8= -github.com/jackc/pgconn v1.10.1/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.13.0 h1:3L1XMNV2Zvca/8BYhzcRFS70Lr0WlDg16Di6SFGAbys= +github.com/jackc/pgconn v1.13.0/go.mod h1:AnowpAqO4CMIIJNZl2VJp+KrkAZciAkhEl0W0JIobpI= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= @@ -903,8 +918,8 @@ github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwX github.com/jackc/pgproto3/v2 v2.0.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.2.0 h1:r7JypeP2D3onoQTCxWdTpCtJ4D+qpKr0TxvoyMhZ5ns= -github.com/jackc/pgproto3/v2 v2.2.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.1 h1:nwj7qwf0S+Q7ISFfBndqeLwSwxs+4DPsbRFjECT1Y4Y= +github.com/jackc/pgproto3/v2 v2.3.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= @@ -912,12 +927,13 @@ github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01C github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0= +github.com/jackc/pgtype v1.3.0/go.mod h1:b0JqxHvPmljG+HQ5IsvQ0yqeSi4nGcDTVjFoiLDb0Ik= github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po= github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ= github.com/jackc/pgtype v1.4.2/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig= github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= -github.com/jackc/pgtype v1.9.0 h1:/SH1RxEtltvJgsDqp3TbiTFApD3mey3iygpuEGeuBXk= -github.com/jackc/pgtype v1.9.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgtype v1.12.0 h1:Dlq8Qvcch7kiehm8wPGIW0W3KsCCHJnRacKW0UM8n5w= +github.com/jackc/pgtype v1.12.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= @@ -925,29 +941,31 @@ github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9 github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= github.com/jackc/pgx/v4 v4.4.1/go.mod h1:6iSW+JznC0YT+SgBn7rNxoEBsBgSmnC5FwyCekOGUiE= github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA= +github.com/jackc/pgx/v4 v4.6.0/go.mod h1:vPh43ZzxijXUVJ+t/EmXBtFmbFVO72cuneCT9oAlxAg= github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o= github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg= github.com/jackc/pgx/v4 v4.8.1/go.mod h1:4HOLxrl8wToZJReD04/yB20GDwf4KBYETvlHciCnwW0= github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= -github.com/jackc/pgx/v4 v4.14.0 h1:TgdrmgnM7VY72EuSQzBbBd4JA1RLqJolrw9nQVZABVc= -github.com/jackc/pgx/v4 v4.14.0/go.mod h1:jT3ibf/A0ZVCp89rtCIN0zCJxcE74ypROmHEZYsG/j8= +github.com/jackc/pgx/v4 v4.17.2 h1:0Ut0rpeKwvIVbMQ1KbMBU4h6wxehBI535LK6Flheh8E= +github.com/jackc/pgx/v4 v4.17.2/go.mod h1:lcxIZN44yMIrWI78a5CpucdD14hX0SBDbNRvjDBItsw= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.2.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jandelgado/gcov2lcov v1.0.4-0.20210120124023-b83752c6dc08/go.mod h1:NnSxK6TMlg1oGDBfGelGbjgorT5/L3cchlbtgFYZSss= github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= -github.com/jinzhu/now v1.1.2/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= -github.com/jinzhu/now v1.1.3/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= -github.com/jinzhu/now v1.1.4 h1:tHnRBy1i5F2Dh8BAFxqFzxKqqvezXrL2OW1TnX+Mlas= github.com/jinzhu/now v1.1.4/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= +github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -966,8 +984,6 @@ github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8Hm github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -982,10 +998,13 @@ github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= github.com/karrick/godirwalk v1.7.7/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= github.com/karrick/godirwalk v1.7.8/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/karrick/godirwalk v1.10.9/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/karrick/godirwalk v1.10.12/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/karrick/godirwalk v1.15.3/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/karrick/godirwalk v1.15.5/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= +github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= @@ -993,9 +1012,11 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA= github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/knadh/koanf v0.14.1-0.20201201075439-e0853799f9ec/go.mod h1:H5mEFsTeWizwFXHKtsITL5ipsLTuAMQoGuQpp+1JL9U= github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1003,9 +1024,8 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= @@ -1036,23 +1056,25 @@ github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk= github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/luna-duclos/instrumentedsql v0.0.0-20181127104832-b7d587d28109/go.mod h1:PWUIzhtavmOR965zfawVsHXbEuU1G29BPZ/CB3C7jXk= github.com/luna-duclos/instrumentedsql v1.1.2/go.mod h1:4LGbEqDnopzNAiyxPPDXhLspyunZxgPTMJBKtC6U0BQ= +github.com/luna-duclos/instrumentedsql v1.1.3/go.mod h1:9J1njvFds+zN7y85EDhN9XNQLANWwZt2ULeIC8yMNYs= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.4/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/markbates/deplist v1.0.4/go.mod h1:gRRbPbbuA8TmMiRvaOzUlRfzfjeCCBqX2A6arxN01MM= github.com/markbates/deplist v1.0.5/go.mod h1:gRRbPbbuA8TmMiRvaOzUlRfzfjeCCBqX2A6arxN01MM= github.com/markbates/deplist v1.1.3/go.mod h1:BF7ioVzAJYEtzQN/os4rt8H8Ti3h0T7EoN+7eyALktE= +github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc= github.com/markbates/going v1.0.2/go.mod h1:UWCk3zm0UKefHZ7l8BNqi26UyiEMniznk8naLdTcy6c= github.com/markbates/grift v1.0.4/go.mod h1:wbmtW74veyx+cgfwFhlnnMWqhoz55rnHR47oMXzsyVs= github.com/markbates/hmax v1.0.0/go.mod h1:cOkR9dktiESxIMu+65oc/r/bdY4bE8zZw3OLhLx0X2c= @@ -1076,7 +1098,6 @@ github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcncea github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= @@ -1090,7 +1111,6 @@ github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOA github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= @@ -1121,6 +1141,7 @@ github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2/go.mod h1:TjQg8pa4iejrUrjiz0MCtMV38jdMNW4doKSiBrEvCQQ= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -1133,6 +1154,8 @@ github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3P github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/monoculum/formam v0.0.0-20180901015400-4e68be1d79ba/go.mod h1:RKgILGEJq24YyJ2ban8EO0RUVSJlF1pGsEvoLEACr/Q= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/moul/http2curl v0.0.0-20170919181001-9ac6cf4d929b/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= @@ -1140,27 +1163,18 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/ncw/swift v1.0.49/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/ncw/swift v1.0.53 h1:luHjjTNtekIEvHg5KdAFIBaH7bWfNkefwFnpDffSIks= github.com/ncw/swift v1.0.53/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/oleiade/reflections v1.0.0 h1:0ir4pc6v8/PJ0yw5AEtMddfXpWBXg9cnG7SgSoJuCgY= github.com/oleiade/reflections v1.0.0/go.mod h1:RbATFBbKYkVdqmSFtx13Bb/tVhR0lgOBXunWTZKeL4w= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/oleiade/reflections v1.0.1 h1:D1XO3LVEYroYskEsoSiGItp9RUxG6jWnCVvrqH0HHQM= +github.com/oleiade/reflections v1.0.1/go.mod h1:rdFxbxq4QXVZWj0F+e9jqjDkc7dbp97vkRixKo2JR60= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -1168,10 +1182,10 @@ github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.9.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1181,30 +1195,28 @@ github.com/onsi/gomega v1.6.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.1-0.20190913142402-a7454ce5950e/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/ory/analytics-go/v4 v4.0.0/go.mod h1:FMx9cLRD9xN+XevPvZ5FDMfignpmcqPP6FUKnJ9/MmE= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/ory/dockertest/v3 v3.5.4/go.mod h1:J8ZUbNB2FOhm1cFZW9xBpDsODqsSWcyYgtJYVPcnF70= +github.com/ory/dockertest/v3 v3.6.3/go.mod h1:EFLcVUOl8qCwp9NyDAcCDtq/QviLtYswW/VbWzUnTNE= github.com/ory/fosite v0.29.0/go.mod h1:0atSZmXO7CAcs6NPMI/Qtot8tmZYj04Nddoold4S2h0= -github.com/ory/fosite v0.39.0 h1:u1Ct/ME7XYzREvufr7ehBIdq/KatjVLIYg/ABqWzprw= -github.com/ory/fosite v0.39.0/go.mod h1:37r59qkOSPueYKmaA7EHiXrDMF1B+XPN+MgkZgTRg3Y= +github.com/ory/fosite v0.42.2 h1:fKfGAgMmmeM1C0DXCyt5TOzQWrKmLOL+PApEC4bIv2o= +github.com/ory/fosite v0.42.2/go.mod h1:qggrqm3ZWQF9i2f/d3RLH5mHHPtv44hsiltkVKLsCYo= github.com/ory/go-acc v0.0.0-20181118080137-ddc355013f90/go.mod h1:sxnvPCxChFuSmTJGj8FdMupeq1BezCiEpDjTUXQ4hf4= -github.com/ory/go-acc v0.2.5 h1:31irXHzG2vnKQSE4weJm7AdfrnpaVjVCq3nD7viXCJE= -github.com/ory/go-acc v0.2.5/go.mod h1:4Kb/UnPcT8qRAk3IAxta+hvVapdxTLWtrr7bFLlEgpw= +github.com/ory/go-acc v0.2.6 h1:YfI+L9dxI7QCtWn2RbawqO0vXhiThdXu/RgizJBbaq0= +github.com/ory/go-acc v0.2.6/go.mod h1:4Kb/UnPcT8qRAk3IAxta+hvVapdxTLWtrr7bFLlEgpw= github.com/ory/go-convenience v0.1.0 h1:zouLKfF2GoSGnJwGq+PE/nJAE6dj2Zj5QlTgmMTsTS8= github.com/ory/go-convenience v0.1.0/go.mod h1:uEY/a60PL5c12nYz4V5cHY03IBmwIAEm8TWB0yn9KNs= github.com/ory/gojsonreference v0.0.0-20190720135523-6b606c2d8ee8/go.mod h1:wsH1C4nIeeQClDtD5AH7kF1uTS6zWyqfjVDTmB0Em7A= @@ -1212,6 +1224,7 @@ github.com/ory/gojsonschema v1.1.1-0.20190919112458-f254ca73d5e9/go.mod h1:BNZpd github.com/ory/herodot v0.6.2/go.mod h1:3BOneqcyBsVybCPAJoi92KN2BpJHcmDqAMcAAaJiJow= github.com/ory/herodot v0.7.0/go.mod h1:YXKOfAXYdQojDP5sD8m0ajowq3+QXNdtxA+QiUXBwn0= github.com/ory/herodot v0.8.3/go.mod h1:rvLjxOAlU5omtmgjCfazQX2N82EpMfl3BytBWc1jjsk= +github.com/ory/herodot v0.9.2/go.mod h1:Da2HXR8mpwPbPrH+Gv9qV8mM5gI3v+PoJ69BA4l2RAk= github.com/ory/jsonschema/v3 v3.0.1/go.mod h1:jgLHekkFk0uiGdEWGleC+tOm6JSSP8cbf17PnBuGXlw= github.com/ory/viper v1.5.6/go.mod h1:TYmpFpKLxjQwvT4f0QPpkOn4sDXU1kDgAwJpgLYiQ28= github.com/ory/viper v1.7.4/go.mod h1:T6sodNZKNGPpashUOk7EtXz2isovz8oCd57GNVkkNmE= @@ -1220,9 +1233,9 @@ github.com/ory/viper v1.7.5/go.mod h1:ypOuyJmEUb3oENywQZRgeAMwqgOyDqwboO1tj3DjTa github.com/ory/x v0.0.84/go.mod h1:RXLPBG7B+hAViONVg0sHwK+U/ie1Y/NeXrq1JcARfoE= github.com/ory/x v0.0.93/go.mod h1:lfcTaGXpTZs7IEQAW00r9EtTCOxD//SiP5uWtNiz31g= github.com/ory/x v0.0.110/go.mod h1:DJfkE3GdakhshNhw4zlKoRaL/ozg/lcTahA9OCih2BE= -github.com/ory/x v0.0.162 h1:xE/UBmmMlInTvlgGXUyo+VeZAcWU5gyWb/xh6jmBWsI= -github.com/ory/x v0.0.162/go.mod h1:sj3z/MeCrAyNFFTfN6yK1nTmHXGSFnw+QwIIQ/Rowec= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/ory/x v0.0.127/go.mod h1:FwUujfFuCj5d+xgLn4fGMYPnzriR5bdAIulFXMtnK0M= +github.com/ory/x v0.0.214 h1:nz5ijvm5MVhYxWsQSuUrW1hj9F5QLZvPn/nLo5s06T4= +github.com/ory/x v0.0.214/go.mod h1:aRl57gzyD4GF0HQCekovXhv0xTZgAgiht3o8eVhsm9Q= github.com/parnurzeal/gorequest v0.2.15/go.mod h1:3Kh2QUMJoqw3icWAecsyzkpY7UzRfDhbRdTjtNwNiUE= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= @@ -1230,17 +1243,16 @@ github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtP github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= -github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.0-beta.8 h1:dy81yyLYJDwMTifq24Oi/IslOslRrDSb3jwDggjz3Z0= github.com/pelletier/go-toml/v2 v2.0.0-beta.8/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= @@ -1253,7 +1265,6 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pkg/sftp v1.10.0/go.mod h1:NxmoDg/QLVWluQDUYG7XBZTLUpKeFa8e3aMf1BfjyHk= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -1261,45 +1272,32 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021 h1:0XM1XL/OFFJjXsYXlG30spTkV/E9+gmd5GD1w2HE8xM= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v0.9.4/go.mod h1:oCXIBxdI62A4cR6aTRJCgetEjecSIYzOEaeAn4iYEpM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= @@ -1309,6 +1307,7 @@ github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqn github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rhnvrm/simples3 v0.5.0/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.0.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -1327,11 +1326,11 @@ github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= github.com/santhosh-tekuri/jsonschema/v2 v2.1.0/go.mod h1:yzJzKUGV4RbWqWIBBP4wSOBqavX5saE02yirLS0OTyg= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/seatgeek/logrus-gelf-formatter v0.0.0-20210219220335-367fa274be2c/go.mod h1:/THDZYi7F/BsVEcYzYPqdcWFQ+1C2InkawTKfLOAnzg= github.com/segmentio/analytics-go v3.0.1+incompatible/go.mod h1:C7CYBtQWk4vRk2RyLu0qOcbHJ18E3F1HV2C/8JvKN48= github.com/segmentio/analytics-go v3.1.0+incompatible/go.mod h1:C7CYBtQWk4vRk2RyLu0qOcbHJ18E3F1HV2C/8JvKN48= github.com/segmentio/backo-go v0.0.0-20160424052352-204274ad699c/go.mod h1:kJ9mm9YmoWSkk+oQ+5Cj8DEoRCX2JT6As4kEtIIOp1M= @@ -1363,6 +1362,7 @@ github.com/sirupsen/logrus v1.1.0/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8 github.com/sirupsen/logrus v1.1.1/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= @@ -1375,7 +1375,6 @@ github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:X github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= @@ -1384,30 +1383,28 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.2.0/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.3.2/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.5.1/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.2-0.20200723214538-8d17101741c8/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -1417,19 +1414,17 @@ github.com/spf13/viper v1.3.1/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.11.0 h1:7OX/1FS6n7jHD1zGrZTM7WtY13ZELRyosK4k93oPr44= github.com/spf13/viper v1.11.0/go.mod h1:djo0X/bA5+tYVoCn+C7cAYJGcVn/qYLFTG8gdUsX7Zk= github.com/sqs/goreturns v0.0.0-20181028201513-538ac6014518/go.mod h1:CKI4AZ4XmGV240rTHfO0hfE83S6/a3/Q1siZJ/vXf7A= +github.com/square/go-jose/v3 v3.0.0-20200630053402-0a67ce9b0693/go.mod h1:6hSY48PjDm4UObWmGLyJE9DxYVKTgR9kbCspXXJEhcU= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/objx v0.3.0 h1:NGXK3lHquSN08v5vWalVI/L8XU9hdzE/G6xsrze47As= -github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -1437,17 +1432,22 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.1.1/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tidwall/gjson v1.3.2/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= +github.com/tidwall/gjson v1.6.8/go.mod h1:zeFuBCIqD4sN/gmqBzZ4j7Jd6UcA2Fc56x7QFsv+8fI= +github.com/tidwall/gjson v1.7.1/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= +github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.0.2/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.1.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/sjson v1.0.4/go.mod h1:bURseu1nuBkFpIES5cz6zBtjmYeOQmEESshn7VpF15Y= +github.com/tidwall/sjson v1.1.5/go.mod h1:VuJzsZnTowhSxWdOgsAnb886i4AjEyTkk7tNtsL7EYE= github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= @@ -1461,12 +1461,11 @@ github.com/unionai/cron/v3 v3.0.2-0.20210825070134-bfc34418fe84 h1:EompdlTtH1Gbc github.com/unionai/cron/v3 v3.0.2-0.20210825070134-bfc34418fe84/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/unrolled/secure v0.0.0-20180918153822-f340ee86eb8b/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA= github.com/unrolled/secure v0.0.0-20181005190816-ff9db2ff917f/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= @@ -1485,9 +1484,7 @@ go.elastic.co/apm/module/apmhttp v1.8.0/go.mod h1:9LPFlEON51/lRbnWDfqAWErihIiAFD go.elastic.co/apm/module/apmot v1.8.0/go.mod h1:Q5Xzabte8G/fkvDjr1jlDuOSUt9hkVWNZEHh6ZNaTjI= go.elastic.co/fastjson v1.0.0/go.mod h1:PmeUOMMtLHQr9ZS9J9owrAVg0FkaZDRZJEFTTGHtchs= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= @@ -1498,8 +1495,10 @@ go.etcd.io/etcd/client/v3 v3.5.1/go.mod h1:OnjH4M8OnAotwaB2l9bVgZzRFKru7/ZMoS46O go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= @@ -1507,21 +1506,24 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/contrib v0.18.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= -go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.13.0/go.mod h1:TwTkyRaTam1pOIb2wxcAiC2hkMVbokXkt6DEt5nDkD8= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.18.0/go.mod h1:iK1G0FgHurSJ/aYLg5LpnPI0pqdanM73S3dhyDp0Lk4= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= -go.opentelemetry.io/otel v0.13.0/go.mod h1:dlSNewoRYikTkotEnxdmuBHgzT+k/idJSfDv/FxEnOY= +go.opentelemetry.io/otel v0.18.0/go.mod h1:PT5zQj4lTsR1YeARt8YNKcFb88/c2IKoSABK9mX0r78= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= +go.opentelemetry.io/otel/metric v0.18.0/go.mod h1:kEH2QtzAyBy3xDVQfGZKIcok4ZZFvd5xyKPfPcuK6pE= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/oteltest v0.18.0/go.mod h1:NyierCU3/G8DLTva7KRzGii2fdxdR89zXKH1bNWY7Bo= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= +go.opentelemetry.io/otel/trace v0.18.0/go.mod h1:FzdUu3BPwZSZebfQ1vl5/tAa8LyMLXSJN57AXIt/iDk= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1565,13 +1567,16 @@ golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190102171810-8d7daa0c54b3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -1594,8 +1599,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1658,9 +1663,9 @@ golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20181207154023-610586996380/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -1687,10 +1692,10 @@ golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -1704,8 +1709,6 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1713,8 +1716,8 @@ golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d h1:4SFsTMi4UahlKoloni7L4eYzhFRifURQLw+yv0QDCx8= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181003184128-c57b0facaced/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1724,7 +1727,6 @@ golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210126194326-f9ce19ea3013/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= @@ -1741,14 +1743,16 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180816055513-1c9583448a9c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1768,7 +1772,6 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181106135930-3a76605856fd/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181206074257-70b957f3b65e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1776,12 +1779,15 @@ golang.org/x/sys v0.0.0-20190116161447-11f53e031339/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1796,7 +1802,6 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191105231009-c1f44814a5cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1818,16 +1823,13 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1862,12 +1864,13 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220608164250-635b8c9b7f68 h1:z8Hj/bl9cOV2grsOpEaQFUaly0JWN3i97mo3jXKJNp0= -golang.org/x/sys v0.0.0-20220608164250-635b8c9b7f68/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1876,21 +1879,18 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181003024731-2f84ea8ef872/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181006002542-f60d9635b16a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1922,13 +1922,18 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190613204242-ed0dc450797f/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624190245-7f2218787638/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -1964,6 +1969,7 @@ golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200308013534-11ec41452d41/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -1973,13 +1979,10 @@ golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200721223218-6123e77877b2/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200828161849-5deb26317202/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20200915173823-2db8f0ff891c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20200918232735-d647fc253266/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1995,9 +1998,8 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.1.11 h1:loJ25fNOEhSXfHrpoGj91eCUThwdNX6u24rO1xnNteY= -golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= +golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2014,7 +2016,6 @@ gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6d gonum.org/v1/netlib v0.0.0-20191229114700-bbb4dff026f8/go.mod h1:2IgXn/sJaRbePPBA1wRj8OE+QLvVaH0q8SK6TSTKlnk= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= gonum.org/v1/plot v0.0.0-20200111075622-4abb28f724d5/go.mod h1:+HbaZVpsa73UwN7kXGCECULRHovLRJjH+t5cFPgxErs= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -2033,11 +2034,8 @@ google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.31.0/go.mod h1:CL+9IBCa2WWU6gRuBWaKqGWLFFwbEUXkfeMkHLQWYWo= -google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.38.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= @@ -2073,7 +2071,6 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190626174449-989357319d63/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190708153700-3bdd9d9f5532/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= @@ -2102,11 +2099,9 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEY google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200831141814-d751682dd103/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200914193844-75d14daec038/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200921151605-7abf4a1a14d5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -2114,7 +2109,6 @@ google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210302174412-5ede27ff9881/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -2159,7 +2153,6 @@ google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220426171045-31bebdecfb46 h1:G1IeWbjrqEq9ChWxEuRPJu6laA67+XgTFHVSAvepr38= google.golang.org/genproto v0.0.0-20220426171045-31bebdecfb46/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -2178,7 +2171,6 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= @@ -2197,6 +2189,7 @@ google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11 google.golang.org/grpc v1.46.0 h1:oCjezcn6g6A75TGoKYBPgKmVBLexhYLM6MebdrPApP8= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc/examples v0.0.0-20210304020650-930c79186c99/go.mod h1:Ly7ZA/ARzg8fnPU9TyZIxoz33sEUuWX7txiqs8lPTgE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -2220,13 +2213,10 @@ gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/mold.v2 v2.2.0/go.mod h1:XMyyRsGtakkDPbxXbrA5VODo6bUXyvoDjLd5l3T0XoA= @@ -2238,7 +2228,6 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.55.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= @@ -2252,18 +2241,16 @@ gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlI gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= -gopkg.in/kothar/go-backblaze.v0 v0.0.0-20190520213052-702d4e7eb465/go.mod h1:zJ2QpyDCYo1KvLXlmdnFlQAyF/Qfth0fB8239Qg7BIE= gopkg.in/mail.v2 v2.0.0-20180731213649-a0242b2233b4/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.1.9/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.2-0.20210529014059-a5c7eec3c614 h1:lwJmuuJQGclcankpPJwh8rorzB0bNbVALv8phDGh8TQ= +gopkg.in/square/go-jose.v2 v2.5.2-0.20210529014059-a5c7eec3c614/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/validator.v2 v2.0.0-20180514200540-135c24b11c19/go.mod h1:o4V0GXN9/CAmCsvJ0oXYZvrZOe7syiDZSN1GWGZTGzc= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -2276,30 +2263,31 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/driver/mysql v1.0.1 h1:omJoilUzyrAp0xNoio88lGJCroGdIOen9hq2A/+3ifw= gorm.io/driver/mysql v1.0.1/go.mod h1:KtqSthtg55lFp3S5kUXqlGaelnWpKitn4k1xZTnoiPw= +gorm.io/driver/mysql v1.4.4 h1:MX0K9Qvy0Na4o7qSC/YI7XxqUw5KDw01umqgID+svdQ= +gorm.io/driver/mysql v1.4.4/go.mod h1:BCg8cKI+R0j/rZRQxeKis/forqRwRSYOR8OM3Wo6hOM= gorm.io/driver/postgres v1.0.0/go.mod h1:wtMFcOzmuA5QigNsgEIb7O5lhvH1tHAF1RbWmLWV4to= -gorm.io/driver/postgres v1.2.3 h1:f4t0TmNMy9gh3TU2PX+EppoA6YsgFnyq8Ojtddb42To= -gorm.io/driver/postgres v1.2.3/go.mod h1:pJV6RgYQPG47aM1f0QeOzFH9HxQc8JcmAgjRCgS0wjs= +gorm.io/driver/postgres v1.4.5 h1:mTeXTTtHAgnS9PgmhN2YeUbazYpLhUI1doLnw42XUZc= +gorm.io/driver/postgres v1.4.5/go.mod h1:GKNQYSJ14qvWkvPwXljMGehpKrhlDNsqYRr5HnYGncg= gorm.io/driver/sqlite v1.1.1 h1:qtWqNAEUyi7gYSUAJXeiAMz0lUOdakZF5ia9Fqnp5G4= gorm.io/driver/sqlite v1.1.1/go.mod h1:hm2olEcl8Tmsc6eZyxYSeznnsDaMqamBvEXLNtBg4cI= gorm.io/driver/sqlserver v1.0.2 h1:FzxAlw0/7hntMzSiNfotpYCo9Lz8dqWQGdmCGqIiFGo= gorm.io/driver/sqlserver v1.0.2/go.mod h1:gb0Y9QePGgqjzrVyTQUZeh9zkd5v0iz71cM1B4ZycEY= gorm.io/gorm v1.9.19/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= gorm.io/gorm v1.20.0/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= -gorm.io/gorm v1.22.3/go.mod h1:F+OptMscr0P2F2qU97WT1WimdH9GaQPoDW7AYd5i2Y0= -gorm.io/gorm v1.22.4 h1:8aPcyEJhY0MAt8aY6Dc524Pn+pO29K+ydu+e/cXSpQM= -gorm.io/gorm v1.22.4/go.mod h1:1aeVC+pe9ZmvKZban/gW4QPra7PRoTEssyc922qCAkk= +gorm.io/gorm v1.23.8/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= +gorm.io/gorm v1.24.1-0.20221019064659-5dd2bb482755 h1:7AdrbfcvKnzejfqP5g37fdSZOXH/JvaPIzBIHTOqXKk= +gorm.io/gorm v1.24.1-0.20221019064659-5dd2bb482755/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2308,39 +2296,32 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= -k8s.io/api v0.0.0-20210217171935-8e2decd92398/go.mod h1:60tmSUpHxGPFerNHbo/ayI2lKxvtrhbxFyXuEIWJd78= -k8s.io/api v0.20.2/go.mod h1:d7n6Ehyzx+S+cE3VhTGfVNNqtGc/oL9DCdYYahlurV8= k8s.io/api v0.24.1 h1:BjCMRDcyEYz03joa3K1+rbshwh1Ay6oB53+iUx2H8UY= k8s.io/api v0.24.1/go.mod h1:JhoOvNiLXKTPQ60zh2g0ewpA+bnEYf5q44Flhquh4vQ= k8s.io/apiextensions-apiserver v0.24.1 h1:5yBh9+ueTq/kfnHQZa0MAo6uNcPrtxPMpNQgorBaKS0= k8s.io/apiextensions-apiserver v0.24.1/go.mod h1:A6MHfaLDGfjOc/We2nM7uewD5Oa/FnEbZ6cD7g2ca4Q= -k8s.io/apimachinery v0.0.0-20210217011835-527a61b4dffe/go.mod h1:Z7ps/g0rjlTeMstYrMOUttJfT2Gg34DEaG/f2PYLCWY= -k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.24.1 h1:ShD4aDxTQKN5zNf8K1RQ2u98ELLdIW7jEnlO9uAMX/I= k8s.io/apimachinery v0.24.1/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= k8s.io/apiserver v0.24.1/go.mod h1:dQWNMx15S8NqJMp0gpYfssyvhYnkilc1LpExd/dkLh0= -k8s.io/client-go v0.0.0-20210217172142-7279fc64d847/go.mod h1:q0EaghmVye2uui19vxSZ2NG6ssgUWgjudO6vrwXneSI= k8s.io/client-go v0.24.1 h1:w1hNdI9PFrzu3OlovVeTnf4oHDt+FJLd9Ndluvnb42E= k8s.io/client-go v0.24.1/go.mod h1:f1kIDqcEYmwXS/vTbbhopMUbhKp2JhOeVTfxgaCIlF8= k8s.io/code-generator v0.24.1/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= k8s.io/component-base v0.24.1 h1:APv6W/YmfOWZfo+XJ1mZwep/f7g7Tpwvdbo9CQLDuts= k8s.io/component-base v0.24.1/go.mod h1:DW5vQGYVCog8WYpNob3PMmmsY8A3L9QZNg4j/dV3s38= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.5.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU= +k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ= +k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= +k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= +k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= @@ -2354,14 +2335,12 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lR sigs.k8s.io/controller-runtime v0.12.1 h1:4BJY01xe9zKQti8oRjj/NeHKRXthf1YkYJAgLONFFoI= sigs.k8s.io/controller-runtime v0.12.1/go.mod h1:BKhxlA4l7FPK4AQcsuL4X6vZeWnKDXez/vp1Y8dxTU0= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= -sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124 h1:2sgAQQcY0dEW2SsQwTXhQV4vO6+rSslYx8K3XmM5hqQ= -sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/pkg/async/cloudevent/factory.go b/pkg/async/cloudevent/factory.go index e7b7368f1..a43d8749b 100644 --- a/pkg/async/cloudevent/factory.go +++ b/pkg/async/cloudevent/factory.go @@ -63,15 +63,19 @@ func NewCloudEventsPublisher(ctx context.Context, config runtimeInterfaces.Cloud return cloudEventImplementations.NewCloudEventsPublisher(&cloudEventImplementations.PubSubSender{Pub: publisher}, scope, config.EventsPublisherConfig.EventTypes) case cloudEventImplementations.Kafka: saramaConfig := sarama.NewConfig() - saramaConfig.Version = config.KafkaConfig.Version + var err error + saramaConfig.Version, err = sarama.ParseKafkaVersion(config.KafkaConfig.Version) + if err != nil { + logger.Fatalf(ctx, "failed to parse kafka version, %v", err) + panic(err) + } sender, err := kafka_sarama.NewSender(config.KafkaConfig.Brokers, saramaConfig, config.EventsPublisherConfig.TopicName) if err != nil { panic(err) } - defer sender.Close(ctx) client, err := cloudevents.NewClient(sender, cloudevents.WithTimeNow(), cloudevents.WithUUIDs()) if err != nil { - logger.Fatalf(ctx, "failed to create client, %v", err) + logger.Fatalf(ctx, "failed to create kafka client, %v", err) panic(err) } return cloudEventImplementations.NewCloudEventsPublisher(&cloudEventImplementations.KafkaSender{Client: client}, scope, config.EventsPublisherConfig.EventTypes) diff --git a/pkg/async/cloudevent/factory_test.go b/pkg/async/cloudevent/factory_test.go index a9b66c38a..00ac69380 100644 --- a/pkg/async/cloudevent/factory_test.go +++ b/pkg/async/cloudevent/factory_test.go @@ -56,7 +56,10 @@ func TestInvalidKafkaConfig(t *testing.T) { Enable: true, Type: implementations.Kafka, EventsPublisherConfig: runtimeInterfaces.EventsPublisherConfig{TopicName: "topic"}, + KafkaConfig: runtimeInterfaces.KafkaConfig{Version: "0.8.2.0"}, } NewCloudEventsPublisher(context.Background(), cfg, promutils.NewTestScope()) + cfg.KafkaConfig = runtimeInterfaces.KafkaConfig{Version: "2.1.0"} + NewCloudEventsPublisher(context.Background(), cfg, promutils.NewTestScope()) t.Errorf("did not panic") } diff --git a/pkg/async/cloudevent/implementations/cloudevent_publisher_test.go b/pkg/async/cloudevent/implementations/cloudevent_publisher_test.go index 25ae0190b..3da13d9d3 100644 --- a/pkg/async/cloudevent/implementations/cloudevent_publisher_test.go +++ b/pkg/async/cloudevent/implementations/cloudevent_publisher_test.go @@ -79,7 +79,9 @@ var nodeRequest = &admin.NodeExecutionEventRequest{ Id: &nodeExecutionID, OccurredAt: occurredAtProto, Phase: core.NodeExecution_RUNNING, - InputUri: "input uri", + InputValue: &event.NodeExecutionEvent_InputUri{ + InputUri: "input uri", + }, }, } diff --git a/pkg/async/notifications/implementations/event_publisher_test.go b/pkg/async/notifications/implementations/event_publisher_test.go index 56503a673..f4960cf2b 100644 --- a/pkg/async/notifications/implementations/event_publisher_test.go +++ b/pkg/async/notifications/implementations/event_publisher_test.go @@ -66,7 +66,9 @@ var nodeRequest = &admin.NodeExecutionEventRequest{ Id: &nodeExecutionID, OccurredAt: occurredAtProto, Phase: core.NodeExecution_RUNNING, - InputUri: "input uri", + InputValue: &event.NodeExecutionEvent_InputUri{ + InputUri: "input uri", + }, }, } diff --git a/pkg/clusterresource/controller.go b/pkg/clusterresource/controller.go index 7e85e7188..348d61c5e 100644 --- a/pkg/clusterresource/controller.go +++ b/pkg/clusterresource/controller.go @@ -2,6 +2,7 @@ package clusterresource import ( "context" + "crypto/md5" // #nosec "encoding/json" "fmt" "io/ioutil" @@ -10,56 +11,46 @@ import ( "path/filepath" "runtime/debug" "strings" - "time" - - impl2 "github.com/flyteorg/flyteadmin/pkg/clusterresource/impl" - "github.com/flyteorg/flyteadmin/pkg/config" - "github.com/flyteorg/flyteadmin/pkg/executioncluster/impl" - "github.com/flyteorg/flyteadmin/pkg/manager/impl/resources" - "github.com/flyteorg/flyteadmin/pkg/repositories" - errors2 "github.com/flyteorg/flyteadmin/pkg/repositories/errors" - admin2 "github.com/flyteorg/flyteidl/clients/go/admin" + "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - - "github.com/flyteorg/flyteadmin/pkg/clusterresource/interfaces" - "github.com/flyteorg/flyteadmin/pkg/executioncluster" - executionclusterIfaces "github.com/flyteorg/flyteadmin/pkg/executioncluster/interfaces" - "github.com/flyteorg/flyteadmin/pkg/runtime" corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + k8sruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/yaml" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/jsonmergepatch" "k8s.io/apimachinery/pkg/util/mergepatch" "k8s.io/apimachinery/pkg/util/strategicpatch" - "k8s.io/client-go/kubernetes/scheme" - - "k8s.io/apimachinery/pkg/api/meta" - k8sruntime "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer/yaml" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/discovery" "k8s.io/client-go/discovery/cached/memory" "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/restmapper" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - + impl2 "github.com/flyteorg/flyteadmin/pkg/clusterresource/impl" + "github.com/flyteorg/flyteadmin/pkg/clusterresource/interfaces" "github.com/flyteorg/flyteadmin/pkg/common" - "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" - - "k8s.io/apimachinery/pkg/util/wait" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/flyteorg/flytestdlib/logger" - "google.golang.org/grpc/codes" - + "github.com/flyteorg/flyteadmin/pkg/config" "github.com/flyteorg/flyteadmin/pkg/errors" + "github.com/flyteorg/flyteadmin/pkg/executioncluster" + "github.com/flyteorg/flyteadmin/pkg/executioncluster/impl" + executionclusterIfaces "github.com/flyteorg/flyteadmin/pkg/executioncluster/interfaces" + "github.com/flyteorg/flyteadmin/pkg/manager/impl/resources" + "github.com/flyteorg/flyteadmin/pkg/repositories" + errors2 "github.com/flyteorg/flyteadmin/pkg/repositories/errors" + "github.com/flyteorg/flyteadmin/pkg/runtime" runtimeInterfaces "github.com/flyteorg/flyteadmin/pkg/runtime/interfaces" + admin2 "github.com/flyteorg/flyteidl/clients/go/admin" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytestdlib/logger" "github.com/flyteorg/flytestdlib/promutils" - k8serrors "k8s.io/apimachinery/pkg/api/errors" ) const namespaceVariable = "namespace" @@ -92,8 +83,8 @@ type controllerMetrics struct { type FileName = string type NamespaceName = string -type LastModTimeCache = map[FileName]time.Time -type NamespaceCache = map[NamespaceName]LastModTimeCache +type TemplateChecksums = map[FileName][16]byte +type NamespaceCache = map[NamespaceName]TemplateChecksums type templateValuesType = map[string]string @@ -108,19 +99,28 @@ type controller struct { listTargets executionclusterIfaces.ListTargetsInterface } -func (c *controller) templateAlreadyApplied(namespace NamespaceName, templateFile os.FileInfo) bool { +// templateAlreadyApplied checks if there is an applied template with the same checksum +func (c *controller) templateAlreadyApplied(namespace NamespaceName, templateFilename string, checksum [16]byte) bool { namespacedAppliedTemplates, ok := c.appliedTemplates[namespace] if !ok { // There is no record of this namespace altogether. return false } - timestamp, ok := namespacedAppliedTemplates[templateFile.Name()] + appliedChecksum, ok := namespacedAppliedTemplates[templateFilename] if !ok { // There is no record of this file having ever been applied. return false } - // The applied template file could have been modified, in which case we will need to apply it once more. - return timestamp.Equal(templateFile.ModTime()) + // Check if the applied template matches the new one + return appliedChecksum == checksum +} + +// setTemplateChecksum records the latest checksum for the template file +func (c *controller) setTemplateChecksum(namespace NamespaceName, templateFilename string, checksum [16]byte) { + if _, ok := c.appliedTemplates[namespace]; !ok { + c.appliedTemplates[namespace] = make(TemplateChecksums) + } + c.appliedTemplates[namespace][templateFilename] = checksum } // Given a map of templatized variable names -> data source, this function produces an output that maps the same @@ -298,23 +298,21 @@ func (c *controller) syncNamespace(ctx context.Context, project *admin.Project, continue } - if c.templateAlreadyApplied(namespace, templateFile) { - // nothing to do. - logger.Debugf(ctx, "syncing namespace [%s]: templateFile [%s] already applied, nothing to do.", namespace, templateFile.Name()) - continue - } - - // 1) create resource from template: + // 1) create resource from template and check if already applied k8sManifest, err := c.createResourceFromTemplate(ctx, templateDir, templateFileName, project, domain, namespace, templateValues, customTemplateValues) if err != nil { collectedErrs = append(collectedErrs, err) continue } - // 2) create the resource on the kubernetes cluster and cache successful outcomes - if _, ok := c.appliedTemplates[namespace]; !ok { - c.appliedTemplates[namespace] = make(LastModTimeCache) + checksum := md5.Sum([]byte(k8sManifest)) // #nosec + if c.templateAlreadyApplied(namespace, templateFileName, checksum) { + // nothing to do. + logger.Debugf(ctx, "syncing namespace [%s]: templateFile [%s] already applied, nothing to do.", namespace, templateFile.Name()) + continue } + + // 2) create the resource on the kubernetes cluster and cache successful outcomes for _, target := range c.listTargets.GetValidTargets() { dynamicObj, err := prepareDynamicCreate(*target, k8sManifest) if err != nil { @@ -382,7 +380,7 @@ func (c *controller) syncNamespace(ctx context.Context, project *admin.Project, logger.Debugf(ctx, "Successfully updated resource [%+v] in namespace [%s]", dynamicObj.obj.GetKind(), namespace) - c.appliedTemplates[namespace][templateFile.Name()] = templateFile.ModTime() + c.setTemplateChecksum(namespace, templateFileName, checksum) } else { // Some error other than AlreadyExists was raised when we tried to Create the k8s object. c.metrics.KubernetesResourcesCreateErrors.Inc() @@ -397,7 +395,7 @@ func (c *controller) syncNamespace(ctx context.Context, project *admin.Project, logger.Debugf(ctx, "Created resource [%+v] for namespace [%s] in kubernetes", dynamicObj.obj.GetKind(), namespace) c.metrics.KubernetesResourcesCreated.Inc() - c.appliedTemplates[namespace][templateFile.Name()] = templateFile.ModTime() + c.setTemplateChecksum(namespace, templateFileName, checksum) } } } @@ -649,7 +647,7 @@ func NewClusterResourceController(adminDataProvider interfaces.FlyteAdminDataPro listTargets: listTargets, poller: make(chan struct{}), metrics: newMetrics(scope), - appliedTemplates: make(map[string]map[string]time.Time), + appliedTemplates: make(map[string]TemplateChecksums), } } diff --git a/pkg/clusterresource/controller_test.go b/pkg/clusterresource/controller_test.go index 35ab8db4f..c8594851f 100644 --- a/pkg/clusterresource/controller_test.go +++ b/pkg/clusterresource/controller_test.go @@ -2,10 +2,10 @@ package clusterresource import ( "context" + "crypto/md5" // #nosec "io/ioutil" "os" "testing" - "time" "github.com/flyteorg/flyteadmin/pkg/errors" "google.golang.org/grpc/codes" @@ -27,57 +27,23 @@ const domain = "domain-bar" var testScope = mockScope.NewTestScope() -type mockFileInfo struct { - name string - modTime time.Time -} - -func (i *mockFileInfo) Name() string { - return i.name -} - -func (i *mockFileInfo) Size() int64 { - return 0 -} - -func (i *mockFileInfo) Mode() os.FileMode { - return os.ModeExclusive -} - -func (i *mockFileInfo) ModTime() time.Time { - return i.modTime -} - -func (i *mockFileInfo) IsDir() bool { - return false -} - -func (i *mockFileInfo) Sys() interface{} { - return nil -} - func TestTemplateAlreadyApplied(t *testing.T) { const namespace = "namespace" const fileName = "fileName" - var lastModifiedTime = time.Now() testController := controller{ metrics: newMetrics(testScope), } - mockFile := mockFileInfo{ - name: fileName, - modTime: lastModifiedTime, - } - assert.False(t, testController.templateAlreadyApplied(namespace, &mockFile)) - - testController.appliedTemplates = make(map[string]map[string]time.Time) - testController.appliedTemplates[namespace] = make(map[string]time.Time) - assert.False(t, testController.templateAlreadyApplied(namespace, &mockFile)) + checksum1 := md5.Sum([]byte("template1")) // #nosec + checksum2 := md5.Sum([]byte("template2")) // #nosec + assert.False(t, testController.templateAlreadyApplied(namespace, fileName, checksum1)) - testController.appliedTemplates[namespace][fileName] = lastModifiedTime.Add(-10 * time.Minute) - assert.False(t, testController.templateAlreadyApplied(namespace, &mockFile)) + testController.appliedTemplates = make(map[string]TemplateChecksums) + testController.setTemplateChecksum(namespace, fileName, checksum1) + assert.True(t, testController.templateAlreadyApplied(namespace, fileName, checksum1)) + assert.False(t, testController.templateAlreadyApplied(namespace, fileName, checksum2)) - testController.appliedTemplates[namespace][fileName] = lastModifiedTime - assert.True(t, testController.templateAlreadyApplied(namespace, &mockFile)) + testController.setTemplateChecksum(namespace, fileName, checksum2) + assert.True(t, testController.templateAlreadyApplied(namespace, fileName, checksum2)) } func TestPopulateTemplateValues(t *testing.T) { diff --git a/pkg/common/entity.go b/pkg/common/entity.go index 10e64decd..65d3161cf 100644 --- a/pkg/common/entity.go +++ b/pkg/common/entity.go @@ -17,6 +17,7 @@ const ( NamedEntity = "nen" NamedEntityMetadata = "nem" Project = "p" + Signal = "s" ) // ResourceTypeToEntity maps a resource type to an entity suitable for use with Database filters diff --git a/pkg/common/mocks/storage.go b/pkg/common/mocks/storage.go index 9edc5c1ed..d1555ebf4 100644 --- a/pkg/common/mocks/storage.go +++ b/pkg/common/mocks/storage.go @@ -24,7 +24,8 @@ type TestDataStore struct { ctx context.Context, reference storage.DataReference, opts storage.Options, msg proto.Message) error ConstructReferenceCb func( ctx context.Context, reference storage.DataReference, nestedKeys ...string) (storage.DataReference, error) - Store map[storage.DataReference][]byte + DeleteCb func(ctx context.Context, reference storage.DataReference) error + Store map[storage.DataReference][]byte } func (t *TestDataStore) Head(ctx context.Context, reference storage.DataReference) (storage.Metadata, error) { @@ -77,6 +78,10 @@ func (t *TestDataStore) ConstructReference( return storage.DataReference(fmt.Sprintf("%s/%v", reference, nestedPath)), nil } +func (t *TestDataStore) Delete(ctx context.Context, reference storage.DataReference) error { + return t.DeleteCb(ctx, reference) +} + func GetMockStorageClient() *storage.DataStore { mockStorageClient := TestDataStore{ Store: make(map[storage.DataReference][]byte), diff --git a/pkg/config/config.go b/pkg/config/config.go index 5d5bc3dbc..28996bd23 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -25,8 +25,9 @@ type ServerConfig struct { // Deprecated: please use auth.AppAuth.ThirdPartyConfig instead. DeprecatedThirdPartyConfig authConfig.ThirdPartyConfigOptions `json:"thirdPartyConfig" pflag:",Deprecated please use auth.appAuth.thirdPartyConfig instead."` - DataProxy DataProxyConfig `json:"dataProxy" pflag:",Defines data proxy configuration."` - ReadHeaderTimeoutSeconds int `json:"readHeaderTimeoutSeconds" pflag:",The amount of time allowed to read request headers."` + DataProxy DataProxyConfig `json:"dataProxy" pflag:",Defines data proxy configuration."` + ReadHeaderTimeoutSeconds int `json:"readHeaderTimeoutSeconds" pflag:",The amount of time allowed to read request headers."` + KubeClientConfig KubeClientConfig `json:"kubeClientConfig" pflag:",Configuration to control the Kubernetes client"` } type DataProxyConfig struct { @@ -51,6 +52,18 @@ type GrpcConfig struct { MaxMessageSizeBytes int `json:"maxMessageSizeBytes" pflag:",The max size in bytes for incoming gRPC messages"` } +// KubeClientConfig contains the configuration used by flyteadmin to configure its internal Kubernetes Client. +type KubeClientConfig struct { + // QPS indicates the maximum QPS to the master from this client. + // If it's zero, the created RESTClient will use DefaultQPS: 5 + QPS int32 `json:"qps" pflag:",Max QPS to the master for requests to KubeAPI. 0 defaults to 5."` + // Maximum burst for throttle. + // If it's zero, the created RESTClient will use DefaultBurst: 10. + Burst int `json:"burst" pflag:",Max burst rate for throttle. 0 defaults to 10"` + // The maximum length of time to wait before giving up on a server request. A value of zero means no timeout. + Timeout config.Duration `json:"timeout" pflag:",Max duration allowed for every request to KubeAPI before giving up. 0 implies no timeout."` +} + type ServerSecurityOptions struct { Secure bool `json:"secure"` Ssl SslOptions `json:"ssl"` @@ -97,6 +110,11 @@ var defaultServerConfig = &ServerConfig{ }, }, ReadHeaderTimeoutSeconds: 32, // just shy of requestTimeoutUpperBound + KubeClientConfig: KubeClientConfig{ + QPS: 100, + Burst: 25, + Timeout: config.Duration{Duration: 30 * time.Second}, + }, } var serverConfig = config.MustRegisterSection(SectionKey, defaultServerConfig) diff --git a/pkg/config/serverconfig_flags.go b/pkg/config/serverconfig_flags.go index 54d374c80..c37a82603 100755 --- a/pkg/config/serverconfig_flags.go +++ b/pkg/config/serverconfig_flags.go @@ -69,11 +69,15 @@ func (cfg ServerConfig) GetPFlagSet(prefix string) *pflag.FlagSet { cmdFlags.String(fmt.Sprintf("%v%v", prefix, "thirdPartyConfig.flyteClient.clientId"), defaultServerConfig.DeprecatedThirdPartyConfig.FlyteClientConfig.ClientID, "public identifier for the app which handles authorization for a Flyte deployment") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "thirdPartyConfig.flyteClient.redirectUri"), defaultServerConfig.DeprecatedThirdPartyConfig.FlyteClientConfig.RedirectURI, "This is the callback uri registered with the app which handles authorization for a Flyte deployment") cmdFlags.StringSlice(fmt.Sprintf("%v%v", prefix, "thirdPartyConfig.flyteClient.scopes"), defaultServerConfig.DeprecatedThirdPartyConfig.FlyteClientConfig.Scopes, "Recommended scopes for the client to request.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "thirdPartyConfig.flyteClient.audience"), defaultServerConfig.DeprecatedThirdPartyConfig.FlyteClientConfig.Audience, "Audience to use when initiating OAuth2 authorization requests.") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "dataProxy.upload.maxSize"), defaultServerConfig.DataProxy.Upload.MaxSize.String(), "Maximum allowed upload size.") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "dataProxy.upload.maxExpiresIn"), defaultServerConfig.DataProxy.Upload.MaxExpiresIn.String(), "Maximum allowed expiration duration.") cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "dataProxy.upload.defaultFileNameLength"), defaultServerConfig.DataProxy.Upload.DefaultFileNameLength, "Default length for the generated file name if not provided in the request.") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "dataProxy.upload.storagePrefix"), defaultServerConfig.DataProxy.Upload.StoragePrefix, "Storage prefix to use for all upload requests.") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "dataProxy.download.maxExpiresIn"), defaultServerConfig.DataProxy.Download.MaxExpiresIn.String(), "Maximum allowed expiration duration.") cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "readHeaderTimeoutSeconds"), defaultServerConfig.ReadHeaderTimeoutSeconds, "The amount of time allowed to read request headers.") + cmdFlags.Int32(fmt.Sprintf("%v%v", prefix, "kubeClientConfig.qps"), defaultServerConfig.KubeClientConfig.QPS, "Max QPS to the master for requests to KubeAPI. 0 defaults to 5.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "kubeClientConfig.burst"), defaultServerConfig.KubeClientConfig.Burst, "Max burst rate for throttle. 0 defaults to 10") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "kubeClientConfig.timeout"), defaultServerConfig.KubeClientConfig.Timeout.String(), "Max duration allowed for every request to KubeAPI before giving up. 0 implies no timeout.") return cmdFlags } diff --git a/pkg/config/serverconfig_flags_test.go b/pkg/config/serverconfig_flags_test.go index c2493f110..b16e0416d 100755 --- a/pkg/config/serverconfig_flags_test.go +++ b/pkg/config/serverconfig_flags_test.go @@ -365,6 +365,20 @@ func TestServerConfig_SetFlags(t *testing.T) { } }) }) + t.Run("Test_thirdPartyConfig.flyteClient.audience", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("thirdPartyConfig.flyteClient.audience", testValue) + if vString, err := cmdFlags.GetString("thirdPartyConfig.flyteClient.audience"); err == nil { + testDecodeJson_ServerConfig(t, fmt.Sprintf("%v", vString), &actual.DeprecatedThirdPartyConfig.FlyteClientConfig.Audience) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) t.Run("Test_dataProxy.upload.maxSize", func(t *testing.T) { t.Run("Override", func(t *testing.T) { @@ -449,4 +463,46 @@ func TestServerConfig_SetFlags(t *testing.T) { } }) }) + t.Run("Test_kubeClientConfig.qps", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("kubeClientConfig.qps", testValue) + if vInt32, err := cmdFlags.GetInt32("kubeClientConfig.qps"); err == nil { + testDecodeJson_ServerConfig(t, fmt.Sprintf("%v", vInt32), &actual.KubeClientConfig.QPS) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_kubeClientConfig.burst", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("kubeClientConfig.burst", testValue) + if vInt, err := cmdFlags.GetInt("kubeClientConfig.burst"); err == nil { + testDecodeJson_ServerConfig(t, fmt.Sprintf("%v", vInt), &actual.KubeClientConfig.Burst) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_kubeClientConfig.timeout", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := defaultServerConfig.KubeClientConfig.Timeout.String() + + cmdFlags.Set("kubeClientConfig.timeout", testValue) + if vString, err := cmdFlags.GetString("kubeClientConfig.timeout"); err == nil { + testDecodeJson_ServerConfig(t, fmt.Sprintf("%v", vString), &actual.KubeClientConfig.Timeout) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) } diff --git a/pkg/errors/errors.go b/pkg/errors/errors.go index 30922ae02..bb14fbaf5 100644 --- a/pkg/errors/errors.go +++ b/pkg/errors/errors.go @@ -8,6 +8,7 @@ import ( "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" "github.com/flyteorg/flytestdlib/logger" + "github.com/golang/protobuf/proto" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -16,10 +17,9 @@ type FlyteAdminError interface { Error() string Code() codes.Code GRPCStatus() *status.Status - WithDetails(details *admin.EventFailureReason) (FlyteAdminError, error) + WithDetails(details proto.Message) (FlyteAdminError, error) String() string } - type flyteAdminErrorImpl struct { status *status.Status } @@ -43,7 +43,7 @@ func (e *flyteAdminErrorImpl) String() string { // enclose the error in the format that grpc server expect from golang: // // https://github.com/grpc/grpc-go/blob/master/status/status.go#L133 -func (e *flyteAdminErrorImpl) WithDetails(details *admin.EventFailureReason) (FlyteAdminError, error) { +func (e *flyteAdminErrorImpl) WithDetails(details proto.Message) (FlyteAdminError, error) { s, err := e.status.WithDetails(details) if err != nil { return nil, err @@ -107,3 +107,35 @@ func NewIncompatibleClusterError(ctx context.Context, errorMsg, curCluster strin } return statusErr } + +func NewWorkflowExistsDifferentStructureError(ctx context.Context, request *admin.WorkflowCreateRequest) FlyteAdminError { + errorMsg := "workflow with different structure already exists" + statusErr, transformationErr := NewFlyteAdminError(codes.InvalidArgument, errorMsg).WithDetails(&admin.CreateWorkflowFailureReason{ + Reason: &admin.CreateWorkflowFailureReason_ExistsDifferentStructure{ + ExistsDifferentStructure: &admin.WorkflowErrorExistsDifferentStructure{ + Id: request.Id, + }, + }, + }) + if transformationErr != nil { + logger.Panicf(ctx, "Failed to wrap grpc status in type 'Error': %v", transformationErr) + return NewFlyteAdminErrorf(codes.InvalidArgument, errorMsg) + } + return statusErr +} + +func NewWorkflowExistsIdenticalStructureError(ctx context.Context, request *admin.WorkflowCreateRequest) FlyteAdminError { + errorMsg := "workflow with identical structure already exists" + statusErr, transformationErr := NewFlyteAdminError(codes.AlreadyExists, errorMsg).WithDetails(&admin.CreateWorkflowFailureReason{ + Reason: &admin.CreateWorkflowFailureReason_ExistsIdenticalStructure{ + ExistsIdenticalStructure: &admin.WorkflowErrorExistsIdenticalStructure{ + Id: request.Id, + }, + }, + }) + if transformationErr != nil { + logger.Panicf(ctx, "Failed to wrap grpc status in type 'Error': %v", transformationErr) + return NewFlyteAdminErrorf(codes.AlreadyExists, errorMsg) + } + return statusErr +} diff --git a/pkg/errors/errors_test.go b/pkg/errors/errors_test.go index 16d6d99fc..ace659b0c 100644 --- a/pkg/errors/errors_test.go +++ b/pkg/errors/errors_test.go @@ -8,6 +8,7 @@ import ( "google.golang.org/grpc/status" "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" "github.com/stretchr/testify/assert" ) @@ -43,3 +44,49 @@ func TestNewIncompatibleClusterError(t *testing.T) { _, ok = details.GetReason().(*admin.EventFailureReason_IncompatibleCluster) assert.True(t, ok) } + +func TestNewWorkflowExistsDifferentStructureError(t *testing.T) { + wf := &admin.WorkflowCreateRequest{ + Id: &core.Identifier{ + ResourceType: core.ResourceType_WORKFLOW, + Project: "testProj", + Domain: "domain", + Name: "name", + Version: "ver", + }, + } + statusErr := NewWorkflowExistsDifferentStructureError(context.Background(), wf) + assert.NotNil(t, statusErr) + s, ok := status.FromError(statusErr) + assert.True(t, ok) + assert.Equal(t, codes.InvalidArgument, s.Code()) + assert.Equal(t, "workflow with different structure already exists", s.Message()) + + details, ok := s.Details()[0].(*admin.CreateWorkflowFailureReason) + assert.True(t, ok) + _, ok = details.GetReason().(*admin.CreateWorkflowFailureReason_ExistsDifferentStructure) + assert.True(t, ok) +} + +func TestNewWorkflowExistsIdenticalStructureError(t *testing.T) { + wf := &admin.WorkflowCreateRequest{ + Id: &core.Identifier{ + ResourceType: core.ResourceType_WORKFLOW, + Project: "testProj", + Domain: "domain", + Name: "name", + Version: "ver", + }, + } + statusErr := NewWorkflowExistsIdenticalStructureError(context.Background(), wf) + assert.NotNil(t, statusErr) + s, ok := status.FromError(statusErr) + assert.True(t, ok) + assert.Equal(t, codes.AlreadyExists, s.Code()) + assert.Equal(t, "workflow with identical structure already exists", s.Message()) + + details, ok := s.Details()[0].(*admin.CreateWorkflowFailureReason) + assert.True(t, ok) + _, ok = details.GetReason().(*admin.CreateWorkflowFailureReason_ExistsIdenticalStructure) + assert.True(t, ok) +} diff --git a/pkg/executioncluster/impl/cluster_execution_target_provider.go b/pkg/executioncluster/impl/cluster_execution_target_provider.go index 328a28881..ad6e95bb9 100644 --- a/pkg/executioncluster/impl/cluster_execution_target_provider.go +++ b/pkg/executioncluster/impl/cluster_execution_target_provider.go @@ -16,7 +16,7 @@ type clusterExecutionTargetProvider struct{} // Creates a new Execution target for a cluster based on config passed in. func (c *clusterExecutionTargetProvider) GetExecutionTarget(initializationErrorCounter prometheus.Counter, k8sCluster runtime.ClusterConfig) (*executioncluster.ExecutionTarget, error) { - kubeConf, err := flytek8s.GetRestClientConfigForCluster(k8sCluster) + kubeConf, err := flytek8s.GetRestClientConfig("", "", &k8sCluster) if err != nil { return nil, err } diff --git a/pkg/flytek8s/client.go b/pkg/flytek8s/client.go index e22c15a85..a5baabb08 100644 --- a/pkg/flytek8s/client.go +++ b/pkg/flytek8s/client.go @@ -5,6 +5,7 @@ import ( "context" "os" + "github.com/flyteorg/flyteadmin/pkg/config" "github.com/flyteorg/flyteadmin/pkg/errors" "google.golang.org/grpc/codes" @@ -37,33 +38,33 @@ func RemoteClusterConfig(host string, auth runtimeInterfaces.Auth) (*restclient. }, nil } -func GetRestClientConfigForCluster(cluster runtimeInterfaces.ClusterConfig) (*restclient.Config, error) { - kubeConfiguration, err := RemoteClusterConfig(cluster.Endpoint, cluster.Auth) - - if err != nil { - return nil, err - } - logger.Debugf(context.Background(), "successfully loaded kube configuration from %v", cluster) - return kubeConfiguration, nil -} - // Initializes a config using a variety of configurable or default fallback options that can be passed to a Kubernetes client on // initialization. -func GetRestClientConfig(kubeConfig, master string, +func GetRestClientConfig(kubeConfigPathString, master string, k8sCluster *runtimeInterfaces.ClusterConfig) (*restclient.Config, error) { var kubeConfiguration *restclient.Config var err error - if kubeConfig != "" { + kubeClientConfig := &config.GetConfig().KubeClientConfig + if kubeConfigPathString != "" { // ExpandEnv allows using $HOME in the path and it will automatically map to the right OS's user home - kubeConfigPath := os.ExpandEnv(kubeConfig) + kubeConfigPath := os.ExpandEnv(kubeConfigPathString) kubeConfiguration, err = clientcmd.BuildConfigFromFlags(master, kubeConfigPath) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "Error building kubeconfig: %v", err) } - logger.Debugf(context.Background(), "successfully loaded kube config from %s", kubeConfig) + logger.Debugf(context.Background(), "successfully loaded kube config from %s", kubeConfigPathString) } else if k8sCluster != nil { - return GetRestClientConfigForCluster(*k8sCluster) + kubeConfiguration, err = RemoteClusterConfig(k8sCluster.Endpoint, k8sCluster.Auth) + if err != nil { + return nil, err + } + logger.Debugf(context.Background(), "successfully loaded kube configuration from %v", k8sCluster) + + if k8sCluster.KubeClientConfig != nil { + logger.Debugf(context.Background(), "using rest config from remote cluster override for k8s cluster %s", k8sCluster.Name) + kubeClientConfig = k8sCluster.KubeClientConfig + } } else { kubeConfiguration, err = restclient.InClusterConfig() if err != nil { @@ -71,6 +72,13 @@ func GetRestClientConfig(kubeConfig, master string, } logger.Debug(context.Background(), "successfully loaded kube configuration from in cluster config") } + + if kubeClientConfig != nil { + kubeConfiguration.QPS = float32(kubeClientConfig.QPS) + kubeConfiguration.Burst = kubeClientConfig.Burst + kubeConfiguration.Timeout = kubeClientConfig.Timeout.Duration + } + return kubeConfiguration, nil } diff --git a/pkg/manager/impl/description_entity_manager.go b/pkg/manager/impl/description_entity_manager.go new file mode 100644 index 000000000..3dcd7ab3e --- /dev/null +++ b/pkg/manager/impl/description_entity_manager.go @@ -0,0 +1,120 @@ +package impl + +import ( + "context" + "strconv" + + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" + + "github.com/flyteorg/flyteadmin/pkg/common" + + "github.com/flyteorg/flyteadmin/pkg/errors" + "github.com/flyteorg/flyteadmin/pkg/manager/impl/util" + "github.com/flyteorg/flyteadmin/pkg/manager/impl/validation" + "github.com/flyteorg/flyteadmin/pkg/manager/interfaces" + repoInterfaces "github.com/flyteorg/flyteadmin/pkg/repositories/interfaces" + "github.com/flyteorg/flyteadmin/pkg/repositories/transformers" + runtimeInterfaces "github.com/flyteorg/flyteadmin/pkg/runtime/interfaces" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytestdlib/contextutils" + "github.com/flyteorg/flytestdlib/logger" + "github.com/flyteorg/flytestdlib/promutils" + "google.golang.org/grpc/codes" +) + +type DescriptionEntityMetrics struct { + Scope promutils.Scope +} + +type DescriptionEntityManager struct { + db repoInterfaces.Repository + config runtimeInterfaces.Configuration + metrics DescriptionEntityMetrics +} + +func (d *DescriptionEntityManager) GetDescriptionEntity(ctx context.Context, request admin.ObjectGetRequest) ( + *admin.DescriptionEntity, error) { + if err := validation.ValidateDescriptionEntityGetRequest(request); err != nil { + logger.Errorf(ctx, "invalid request [%+v]: %v", request, err) + return nil, err + } + ctx = contextutils.WithProjectDomain(ctx, request.Id.Project, request.Id.Domain) + return util.GetDescriptionEntity(ctx, d.db, *request.Id) +} + +func (d *DescriptionEntityManager) ListDescriptionEntity(ctx context.Context, request admin.DescriptionEntityListRequest) (*admin.DescriptionEntityList, error) { + // Check required fields + if err := validation.ValidateDescriptionEntityListRequest(request); err != nil { + return nil, err + } + ctx = contextutils.WithProjectDomain(ctx, request.Id.Project, request.Id.Domain) + + if request.ResourceType == core.ResourceType_WORKFLOW { + ctx = contextutils.WithWorkflowID(ctx, request.Id.Name) + } else { + ctx = contextutils.WithTaskID(ctx, request.Id.Name) + } + + filters, err := util.GetDbFilters(util.FilterSpec{ + Project: request.Id.Project, + Domain: request.Id.Domain, + Name: request.Id.Name, + RequestFilters: request.Filters, + }, common.ResourceTypeToEntity[request.ResourceType]) + if err != nil { + logger.Error(ctx, "failed to get database filter") + return nil, err + } + var sortParameter common.SortParameter + if request.SortBy != nil { + sortParameter, err = common.NewSortParameter(*request.SortBy) + if err != nil { + return nil, err + } + } + offset, err := validation.ValidateToken(request.Token) + if err != nil { + return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, + "invalid pagination token %s for ListWorkflows", request.Token) + } + listDescriptionEntitiesInput := repoInterfaces.ListResourceInput{ + Limit: int(request.Limit), + Offset: offset, + InlineFilters: filters, + SortParameter: sortParameter, + } + output, err := d.db.DescriptionEntityRepo().List(ctx, listDescriptionEntitiesInput) + if err != nil { + logger.Debugf(ctx, "Failed to list workflows with [%+v] with err %v", request.Id, err) + return nil, err + } + descriptionEntityList, err := transformers.FromDescriptionEntityModels(output.Entities) + if err != nil { + logger.Errorf(ctx, + "Failed to transform workflow models [%+v] with err: %v", output.Entities, err) + return nil, err + } + var token string + if len(output.Entities) == int(request.Limit) { + token = strconv.Itoa(offset + len(output.Entities)) + } + return &admin.DescriptionEntityList{ + DescriptionEntities: descriptionEntityList, + Token: token, + }, nil +} + +func NewDescriptionEntityManager( + db repoInterfaces.Repository, + config runtimeInterfaces.Configuration, + scope promutils.Scope) interfaces.DescriptionEntityInterface { + + metrics := DescriptionEntityMetrics{ + Scope: scope, + } + return &DescriptionEntityManager{ + db: db, + config: config, + metrics: metrics, + } +} diff --git a/pkg/manager/impl/description_entity_manager_test.go b/pkg/manager/impl/description_entity_manager_test.go new file mode 100644 index 000000000..66cdcc4a9 --- /dev/null +++ b/pkg/manager/impl/description_entity_manager_test.go @@ -0,0 +1,146 @@ +package impl + +import ( + "context" + "testing" + + "github.com/flyteorg/flyteadmin/pkg/manager/impl/testutils" + "github.com/flyteorg/flyteadmin/pkg/repositories/interfaces" + repositoryMocks "github.com/flyteorg/flyteadmin/pkg/repositories/mocks" + runtimeInterfaces "github.com/flyteorg/flyteadmin/pkg/runtime/interfaces" + runtimeMocks "github.com/flyteorg/flyteadmin/pkg/runtime/mocks" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" + mockScope "github.com/flyteorg/flytestdlib/promutils" + "github.com/stretchr/testify/assert" +) + +var descriptionEntityIdentifier = core.Identifier{ + ResourceType: core.ResourceType_WORKFLOW, + Project: project, + Domain: domain, + Name: name, + Version: version, +} + +var badDescriptionEntityIdentifier = core.Identifier{ + ResourceType: core.ResourceType_WORKFLOW, + Project: project, + Domain: domain, + Name: "", + Version: version, +} + +func getMockRepositoryForDETest() interfaces.Repository { + return repositoryMocks.NewMockRepository() +} + +func getMockConfigForDETest() runtimeInterfaces.Configuration { + mockConfig := runtimeMocks.NewMockConfigurationProvider( + testutils.GetApplicationConfigWithDefaultDomains(), nil, nil, nil, nil, nil) + return mockConfig +} + +func TestDescriptionEntityManager_Get(t *testing.T) { + repository := getMockRepositoryForDETest() + manager := NewDescriptionEntityManager(repository, getMockConfigForDETest(), mockScope.NewTestScope()) + + response, err := manager.GetDescriptionEntity(context.Background(), admin.ObjectGetRequest{ + Id: &descriptionEntityIdentifier, + }) + assert.NoError(t, err) + assert.NotNil(t, response) + + response, err = manager.GetDescriptionEntity(context.Background(), admin.ObjectGetRequest{ + Id: &badDescriptionEntityIdentifier, + }) + assert.Error(t, err) + assert.Nil(t, response) +} + +func TestDescriptionEntityManager_List(t *testing.T) { + repository := getMockRepositoryForDETest() + manager := NewDescriptionEntityManager(repository, getMockConfigForDETest(), mockScope.NewTestScope()) + + t.Run("failed to validate a request", func(t *testing.T) { + response, err := manager.ListDescriptionEntity(context.Background(), admin.DescriptionEntityListRequest{ + Id: &admin.NamedEntityIdentifier{ + Name: "flyte", + }, + }) + assert.Error(t, err) + assert.Nil(t, response) + }) + + t.Run("failed to sort description entity", func(t *testing.T) { + response, err := manager.ListDescriptionEntity(context.Background(), admin.DescriptionEntityListRequest{ + ResourceType: core.ResourceType_TASK, + Id: &admin.NamedEntityIdentifier{ + Name: "flyte", + Project: "project", + Domain: "domain", + }, + Limit: 1, + SortBy: &admin.Sort{Direction: 3}, + }) + assert.Error(t, err) + assert.Nil(t, response) + }) + + t.Run("failed to validate token", func(t *testing.T) { + response, err := manager.ListDescriptionEntity(context.Background(), admin.DescriptionEntityListRequest{ + ResourceType: core.ResourceType_TASK, + Id: &admin.NamedEntityIdentifier{ + Name: "flyte", + Project: "project", + Domain: "domain", + }, + Limit: 1, + Token: "hello", + }) + assert.Error(t, err) + assert.Nil(t, response) + }) + + t.Run("list description entities in the task", func(t *testing.T) { + response, err := manager.ListDescriptionEntity(context.Background(), admin.DescriptionEntityListRequest{ + ResourceType: core.ResourceType_TASK, + Id: &admin.NamedEntityIdentifier{ + Name: "flyte", + Project: "project", + Domain: "domain", + }, + Limit: 1, + }) + assert.NoError(t, err) + assert.NotNil(t, response) + }) + + t.Run("list description entities in the workflow", func(t *testing.T) { + response, err := manager.ListDescriptionEntity(context.Background(), admin.DescriptionEntityListRequest{ + ResourceType: core.ResourceType_WORKFLOW, + Id: &admin.NamedEntityIdentifier{ + Name: "flyte", + Project: "project", + Domain: "domain", + }, + Limit: 1, + }) + assert.NoError(t, err) + assert.NotNil(t, response) + }) + + t.Run("failed to get filter", func(t *testing.T) { + response, err := manager.ListDescriptionEntity(context.Background(), admin.DescriptionEntityListRequest{ + ResourceType: core.ResourceType_WORKFLOW, + Id: &admin.NamedEntityIdentifier{ + Name: "flyte", + Project: "project", + Domain: "domain", + }, + Filters: "wrong", + }) + assert.Error(t, err) + assert.Nil(t, response) + }) +} diff --git a/pkg/manager/impl/execution_manager.go b/pkg/manager/impl/execution_manager.go index f5ab8a0d2..4f4689715 100644 --- a/pkg/manager/impl/execution_manager.go +++ b/pkg/manager/impl/execution_manager.go @@ -14,8 +14,6 @@ import ( "github.com/flyteorg/flyteadmin/auth" - "k8s.io/apimachinery/pkg/api/resource" - "github.com/flyteorg/flyteadmin/pkg/manager/impl/resources" dataInterfaces "github.com/flyteorg/flyteadmin/pkg/data/interfaces" @@ -186,39 +184,6 @@ func (m *ExecutionManager) addPluginOverrides(ctx context.Context, executionID * return nil, nil } -type completeTaskResources struct { - Defaults runtimeInterfaces.TaskResourceSet - Limits runtimeInterfaces.TaskResourceSet -} - -func getTaskResourcesAsSet(ctx context.Context, identifier *core.Identifier, - resourceEntries []*core.Resources_ResourceEntry, resourceName string) runtimeInterfaces.TaskResourceSet { - - result := runtimeInterfaces.TaskResourceSet{} - for _, entry := range resourceEntries { - switch entry.Name { - case core.Resources_CPU: - result.CPU = parseQuantityNoError(ctx, identifier.String(), fmt.Sprintf("%v.cpu", resourceName), entry.Value) - case core.Resources_MEMORY: - result.Memory = parseQuantityNoError(ctx, identifier.String(), fmt.Sprintf("%v.memory", resourceName), entry.Value) - case core.Resources_EPHEMERAL_STORAGE: - result.EphemeralStorage = parseQuantityNoError(ctx, identifier.String(), - fmt.Sprintf("%v.ephemeral storage", resourceName), entry.Value) - case core.Resources_GPU: - result.GPU = parseQuantityNoError(ctx, identifier.String(), "gpu", entry.Value) - } - } - - return result -} - -func getCompleteTaskResourceRequirements(ctx context.Context, identifier *core.Identifier, task *core.CompiledTask) completeTaskResources { - return completeTaskResources{ - Defaults: getTaskResourcesAsSet(ctx, identifier, task.GetTemplate().GetContainer().Resources.Requests, "requests"), - Limits: getTaskResourcesAsSet(ctx, identifier, task.GetTemplate().GetContainer().Resources.Limits, "limits"), - } -} - // TODO: Delete this code usage after the flyte v0.17.0 release // Assumes input contains a compiled task with a valid container resource execConfig. // @@ -254,7 +219,7 @@ func (m *ExecutionManager) setCompiledTaskDefaults(ctx context.Context, task *co // The IDL representation for container-type tasks represents resources as a list with string quantities. // In order to easily reason about them we convert them to a set where we can O(1) fetch specific resources (e.g. CPU) // and represent them as comparable quantities rather than strings. - taskResourceRequirements := getCompleteTaskResourceRequirements(ctx, task.Template.Id, task) + taskResourceRequirements := util.GetCompleteTaskResourceRequirements(ctx, task.Template.Id, task) cpu := flytek8s.AdjustOrDefaultResource(taskResourceRequirements.Defaults.CPU, taskResourceRequirements.Limits.CPU, platformTaskResources.Defaults.CPU, platformTaskResources.Limits.CPU) @@ -334,68 +299,6 @@ func (m *ExecutionManager) setCompiledTaskDefaults(ctx context.Context, task *co } } -func parseQuantityNoError(ctx context.Context, ownerID, name, value string) resource.Quantity { - q, err := resource.ParseQuantity(value) - if err != nil { - logger.Infof(ctx, "Failed to parse owner's [%s] resource [%s]'s value [%s] with err: %v", ownerID, name, value, err) - } - - return q -} - -func fromAdminProtoTaskResourceSpec(ctx context.Context, spec *admin.TaskResourceSpec) runtimeInterfaces.TaskResourceSet { - result := runtimeInterfaces.TaskResourceSet{} - if len(spec.Cpu) > 0 { - result.CPU = parseQuantityNoError(ctx, "project", "cpu", spec.Cpu) - } - - if len(spec.Memory) > 0 { - result.Memory = parseQuantityNoError(ctx, "project", "memory", spec.Memory) - } - - if len(spec.Storage) > 0 { - result.Storage = parseQuantityNoError(ctx, "project", "storage", spec.Storage) - } - - if len(spec.EphemeralStorage) > 0 { - result.EphemeralStorage = parseQuantityNoError(ctx, "project", "ephemeral storage", spec.EphemeralStorage) - } - - if len(spec.Gpu) > 0 { - result.GPU = parseQuantityNoError(ctx, "project", "gpu", spec.Gpu) - } - - return result -} - -func (m *ExecutionManager) getTaskResources(ctx context.Context, workflow *core.Identifier) workflowengineInterfaces.TaskResources { - resource, err := m.resourceManager.GetResource(ctx, interfaces.ResourceRequest{ - Project: workflow.Project, - Domain: workflow.Domain, - Workflow: workflow.Name, - ResourceType: admin.MatchableResource_TASK_RESOURCE, - }) - - if err != nil { - logger.Warningf(ctx, "Failed to fetch override values when assigning task resource default values for [%+v]: %v", - workflow, err) - } - - logger.Debugf(ctx, "Assigning task requested resources for [%+v]", workflow) - var taskResourceAttributes = workflowengineInterfaces.TaskResources{} - if resource != nil && resource.Attributes != nil && resource.Attributes.GetTaskResourceAttributes() != nil { - taskResourceAttributes.Defaults = fromAdminProtoTaskResourceSpec(ctx, resource.Attributes.GetTaskResourceAttributes().Defaults) - taskResourceAttributes.Limits = fromAdminProtoTaskResourceSpec(ctx, resource.Attributes.GetTaskResourceAttributes().Limits) - } else { - taskResourceAttributes = workflowengineInterfaces.TaskResources{ - Defaults: m.config.TaskResourceConfiguration().GetDefaults(), - Limits: m.config.TaskResourceConfiguration().GetLimits(), - } - } - - return taskResourceAttributes -} - // Fetches inherited execution metadata including the parent node execution db model id and the source execution model id // as well as sets request spec metadata with the inherited principal and adjusted nesting data. func (m *ExecutionManager) getInheritedExecMetadata(ctx context.Context, requestSpec *admin.ExecutionSpec, @@ -420,7 +323,7 @@ func (m *ExecutionManager) getInheritedExecMetadata(ctx context.Context, request } sourceExecutionID = sourceExecutionModel.ID requestSpec.Metadata.Principal = sourceExecutionModel.User - sourceExecution, err := transformers.FromExecutionModel(*sourceExecutionModel) + sourceExecution, err := transformers.FromExecutionModel(*sourceExecutionModel, transformers.DefaultExecutionTransformerOptions) if err != nil { logger.Errorf(ctx, "Failed transform parent execution model for child execution [%+v] with err: %v", workflowExecutionID, err) return parentNodeExecutionID, sourceExecutionID, err @@ -490,11 +393,8 @@ func (m *ExecutionManager) getExecutionConfig(ctx context.Context, request *admi // K8sServiceAccount and IamRole is empty then get the values from the deprecated fields. resolvedAuthRole := resolveAuthRole(request, launchPlan) resolvedSecurityCtx := resolveSecurityCtx(ctx, workflowExecConfig.GetSecurityContext(), resolvedAuthRole) - if workflowExecConfig.GetSecurityContext() == nil && - (len(resolvedSecurityCtx.GetRunAs().GetK8SServiceAccount()) > 0 || - len(resolvedSecurityCtx.GetRunAs().GetIamRole()) > 0) { - workflowExecConfig.SecurityContext = resolvedSecurityCtx - } + workflowExecConfig.SecurityContext = resolvedSecurityCtx + // Merge the application config into workflowExecConfig. If even the deprecated fields are not set workflowExecConfig = util.MergeIntoExecConfig(workflowExecConfig, m.config.ApplicationConfiguration().GetTopLevelConfig()) // Explicitly set the security context if its nil since downstream we expect this settings to be available @@ -575,6 +475,18 @@ func (m *ExecutionManager) launchSingleTaskExecution( return nil, nil, err } + executionInputs, err := validation.CheckAndFetchInputsForExecution( + request.Inputs, + launchPlan.Spec.FixedInputs, + launchPlan.Closure.ExpectedInputs, + ) + if err != nil { + logger.Debugf(ctx, "Failed to CheckAndFetchInputsForExecution with request.Inputs: %+v"+ + "fixed inputs: %+v and expected inputs: %+v with err %v", + request.Inputs, launchPlan.Spec.FixedInputs, launchPlan.Closure.ExpectedInputs, err) + return nil, nil, err + } + name := util.GetExecutionName(request) workflowExecutionID := core.WorkflowExecutionIdentifier{ Project: request.Project, @@ -600,7 +512,7 @@ func (m *ExecutionManager) launchSingleTaskExecution( } // Dynamically assign task resource defaults. - platformTaskResources := m.getTaskResources(ctx, workflow.Id) + platformTaskResources := util.GetTaskResources(ctx, workflow.Id, m.resourceManager, m.config.TaskResourceConfiguration()) for _, t := range workflow.Closure.CompiledWorkflow.Tasks { m.setCompiledTaskDefaults(ctx, t, platformTaskResources) } @@ -647,7 +559,7 @@ func (m *ExecutionManager) launchSingleTaskExecution( } executionParameters := workflowengineInterfaces.ExecutionParameters{ - Inputs: request.Inputs, + Inputs: executionInputs, AcceptedAt: requestedAt, Labels: labels, Annotations: annotations, @@ -721,6 +633,7 @@ func (m *ExecutionManager) launchSingleTaskExecution( InputsURI: inputsURI, UserInputsURI: userInputsURI, SecurityContext: executionConfig.SecurityContext, + LaunchEntity: taskIdentifier.ResourceType, }) if err != nil { logger.Infof(ctx, "Failed to create execution model in transformer for id: [%+v] with err: %v", @@ -850,8 +763,8 @@ func (m *ExecutionManager) launchExecutionAndPrepareModel( return nil, nil, err } - platformTaskResources := m.getTaskResources(ctx, workflow.Id) // Dynamically assign task resource defaults. + platformTaskResources := util.GetTaskResources(ctx, workflow.Id, m.resourceManager, m.config.TaskResourceConfiguration()) for _, task := range workflow.Closure.CompiledWorkflow.Tasks { m.setCompiledTaskDefaults(ctx, task, platformTaskResources) } @@ -974,6 +887,7 @@ func (m *ExecutionManager) launchExecutionAndPrepareModel( InputsURI: inputsURI, UserInputsURI: userInputsURI, SecurityContext: executionConfig.SecurityContext, + LaunchEntity: launchPlan.Id.ResourceType, }) if err != nil { logger.Infof(ctx, "Failed to create execution model in transformer for id: [%+v] with err: %v", @@ -1034,7 +948,7 @@ func (m *ExecutionManager) RelaunchExecution( logger.Debugf(ctx, "Failed to get execution model for request [%+v] with err %v", request, err) return nil, err } - existingExecution, err := transformers.FromExecutionModel(*existingExecutionModel) + existingExecution, err := transformers.FromExecutionModel(*existingExecutionModel, transformers.DefaultExecutionTransformerOptions) if err != nil { return nil, err } @@ -1060,6 +974,7 @@ func (m *ExecutionManager) RelaunchExecution( } executionSpec.Metadata.Mode = admin.ExecutionMetadata_RELAUNCH executionSpec.Metadata.ReferenceExecution = existingExecution.Id + executionSpec.OverwriteCache = request.GetOverwriteCache() var executionModel *models.Execution ctx, executionModel, err = m.launchExecutionAndPrepareModel(ctx, admin.ExecutionCreateRequest{ Project: request.Id.Project, @@ -1090,7 +1005,7 @@ func (m *ExecutionManager) RecoverExecution( logger.Debugf(ctx, "Failed to get execution model for request [%+v] with err %v", request, err) return nil, err } - existingExecution, err := transformers.FromExecutionModel(*existingExecutionModel) + existingExecution, err := transformers.FromExecutionModel(*existingExecutionModel, transformers.DefaultExecutionTransformerOptions) if err != nil { return nil, err } @@ -1141,7 +1056,7 @@ func (m *ExecutionManager) emitScheduledWorkflowMetrics( return } // Find the reference launch plan to get the kickoff time argument - execution, err := transformers.FromExecutionModel(*executionModel) + execution, err := transformers.FromExecutionModel(*executionModel, transformers.DefaultExecutionTransformerOptions) if err != nil { logger.Warningf(context.Background(), "failed to transform execution model when emitting scheduled workflow execution stats with for "+ @@ -1384,7 +1299,7 @@ func (m *ExecutionManager) GetExecution( logger.Debugf(ctx, "Failed to get execution model for request [%+v] with err: %v", request, err) return nil, err } - execution, transformerErr := transformers.FromExecutionModel(*executionModel) + execution, transformerErr := transformers.FromExecutionModel(*executionModel, transformers.DefaultExecutionTransformerOptions) if transformerErr != nil { logger.Debugf(ctx, "Failed to transform execution model [%+v] to proto object with err: %v", request.Id, transformerErr) @@ -1427,7 +1342,7 @@ func (m *ExecutionManager) GetExecutionData( logger.Debugf(ctx, "Failed to get execution model for request [%+v] with err: %v", request, err) return nil, err } - execution, err := transformers.FromExecutionModel(*executionModel) + execution, err := transformers.FromExecutionModel(*executionModel, transformers.DefaultExecutionTransformerOptions) if err != nil { logger.Debugf(ctx, "Failed to transform execution model [%+v] to proto object with err: %v", request.Id, err) return nil, err @@ -1527,7 +1442,7 @@ func (m *ExecutionManager) ListExecutions( logger.Debugf(ctx, "Failed to list executions using input [%+v] with err %v", listExecutionsInput, err) return nil, err } - executionList, err := transformers.FromExecutionModels(output.Executions) + executionList, err := transformers.FromExecutionModels(output.Executions, transformers.ListExecutionTransformerOptions) if err != nil { logger.Errorf(ctx, "Failed to transform execution models [%+v] with err: %v", output.Executions, err) @@ -1557,7 +1472,7 @@ func (m *ExecutionManager) ListExecutions( func (m *ExecutionManager) publishNotifications(ctx context.Context, request admin.WorkflowExecutionEventRequest, execution models.Execution) error { // Notifications are stored in the Spec object of an admin.Execution object. - adminExecution, err := transformers.FromExecutionModel(execution) + adminExecution, err := transformers.FromExecutionModel(execution, transformers.DefaultExecutionTransformerOptions) if err != nil { // This shouldn't happen because execution manager marshaled the data into models.Execution. m.systemMetrics.TransformerError.Inc() diff --git a/pkg/manager/impl/execution_manager_test.go b/pkg/manager/impl/execution_manager_test.go index 309f3cde0..e8d16d348 100644 --- a/pkg/manager/impl/execution_manager_test.go +++ b/pkg/manager/impl/execution_manager_test.go @@ -286,6 +286,7 @@ func TestCreateExecution(t *testing.T) { assert.Equal(t, principal, spec.Metadata.Principal) assert.Equal(t, rawOutput, spec.RawOutputDataConfig.OutputLocationPrefix) assert.True(t, proto.Equal(spec.ClusterAssignment, &clusterAssignment)) + assert.Equal(t, "launch_plan", input.LaunchEntity) return nil }) setDefaultLpCallbackForExecTest(repository) @@ -357,7 +358,8 @@ func TestCreateExecution(t *testing.T) { request.Spec.RawOutputDataConfig = &admin.RawOutputDataConfig{OutputLocationPrefix: rawOutput} request.Spec.ClusterAssignment = &clusterAssignment - identity := auth.NewIdentityContext("", principal, "", time.Now(), sets.NewString(), nil, nil) + identity, err := auth.NewIdentityContext("", principal, "", time.Now(), sets.NewString(), nil, nil) + assert.NoError(t, err) ctx := identity.WithContext(context.Background()) response, err := execManager.CreateExecution(ctx, request, requestedAt) assert.Nil(t, err) @@ -1001,6 +1003,86 @@ func TestCreateExecutionInterruptible(t *testing.T) { } } +func TestCreateExecutionOverwriteCache(t *testing.T) { + tests := []struct { + name string + task bool + overwriteCache bool + want bool + }{ + { + name: "LaunchPlanDefault", + task: false, + overwriteCache: false, + want: false, + }, + { + name: "LaunchPlanEnable", + task: false, + overwriteCache: true, + want: true, + }, + { + name: "TaskDefault", + task: false, + overwriteCache: false, + want: false, + }, + { + name: "TaskEnable", + task: true, + overwriteCache: true, + want: true, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + request := testutils.GetExecutionRequest() + if tt.task { + request.Spec.LaunchPlan.ResourceType = core.ResourceType_TASK + } + request.Spec.OverwriteCache = tt.overwriteCache + + repository := getMockRepositoryForExecTest() + setDefaultLpCallbackForExecTest(repository) + setDefaultTaskCallbackForExecTest(repository) + + exCreateFunc := func(ctx context.Context, input models.Execution) error { + var spec admin.ExecutionSpec + err := proto.Unmarshal(input.Spec, &spec) + assert.Nil(t, err) + + if tt.task { + assert.Equal(t, uint(0), input.LaunchPlanID) + assert.NotEqual(t, uint(0), input.TaskID) + } else { + assert.NotEqual(t, uint(0), input.LaunchPlanID) + assert.Equal(t, uint(0), input.TaskID) + } + + assert.Equal(t, tt.overwriteCache, spec.GetOverwriteCache()) + + return nil + } + + repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback(exCreateFunc) + mockExecutor := workflowengineMocks.WorkflowExecutor{} + mockExecutor.OnExecuteMatch(mock.Anything, mock.Anything, mock.Anything).Return(workflowengineInterfaces.ExecutionResponse{}, nil) + mockExecutor.OnID().Return("testMockExecutor") + r := plugins.NewRegistry() + r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &mockExecutor) + execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) + + _, err := execManager.CreateExecution(context.Background(), request, requestedAt) + assert.Nil(t, err) + }) + } +} + func makeExecutionGetFunc( t *testing.T, closureBytes []byte, startTime *time.Time) repositoryMocks.GetExecutionFunc { return func(ctx context.Context, input interfaces.Identifier) (models.Execution, error) { @@ -1090,6 +1172,39 @@ func makeExecutionInterruptibleGetFunc( } } +func makeExecutionOverwriteCacheGetFunc( + t *testing.T, closureBytes []byte, startTime *time.Time, overwriteCache bool) repositoryMocks.GetExecutionFunc { + return func(ctx context.Context, input interfaces.Identifier) (models.Execution, error) { + assert.Equal(t, "project", input.Project) + assert.Equal(t, "domain", input.Domain) + assert.Equal(t, "name", input.Name) + + request := testutils.GetExecutionRequest() + request.Spec.OverwriteCache = overwriteCache + + specBytes, err := proto.Marshal(request.Spec) + assert.Nil(t, err) + + return models.Execution{ + ExecutionKey: models.ExecutionKey{ + Project: "project", + Domain: "domain", + Name: "name", + }, + BaseModel: models.BaseModel{ + ID: uint(8), + }, + Spec: specBytes, + Phase: core.WorkflowExecution_QUEUED.String(), + Closure: closureBytes, + LaunchPlanID: uint(1), + WorkflowID: uint(2), + StartedAt: startTime, + Cluster: testCluster, + }, nil + } +} + func TestRelaunchExecution(t *testing.T) { // Set up mocks. repository := getMockRepositoryForExecTest() @@ -1280,6 +1395,129 @@ func TestRelaunchExecutionInterruptibleOverride(t *testing.T) { assert.True(t, createCalled) } +func TestRelaunchExecutionOverwriteCacheOverride(t *testing.T) { + // Set up mocks. + repository := getMockRepositoryForExecTest() + setDefaultLpCallbackForExecTest(repository) + mockExecutor := workflowengineMocks.WorkflowExecutor{} + mockExecutor.OnExecuteMatch(mock.Anything, mock.Anything, mock.Anything).Return(workflowengineInterfaces.ExecutionResponse{}, nil) + mockExecutor.OnID().Return("testMockExecutor") + r := plugins.NewRegistry() + r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &mockExecutor) + execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) + startTime := time.Now() + startTimeProto, _ := ptypes.TimestampProto(startTime) + existingClosure := admin.ExecutionClosure{ + Phase: core.WorkflowExecution_RUNNING, + StartedAt: startTimeProto, + } + existingClosureBytes, _ := proto.Marshal(&existingClosure) + + t.Run("override enable", func(t *testing.T) { + executionGetFunc := makeExecutionOverwriteCacheGetFunc(t, existingClosureBytes, &startTime, false) + repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetGetCallback(executionGetFunc) + + var createCalled bool + exCreateFunc := func(ctx context.Context, input models.Execution) error { + createCalled = true + assert.Equal(t, "relaunchy", input.Name) + assert.Equal(t, "domain", input.Domain) + assert.Equal(t, "project", input.Project) + assert.Equal(t, uint(8), input.SourceExecutionID) + var spec admin.ExecutionSpec + err := proto.Unmarshal(input.Spec, &spec) + assert.Nil(t, err) + assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.Metadata.Mode) + assert.Equal(t, int32(admin.ExecutionMetadata_RELAUNCH), input.Mode) + assert.True(t, spec.GetOverwriteCache()) + return nil + } + repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback(exCreateFunc) + + asd, err := execManager.RelaunchExecution(context.Background(), admin.ExecutionRelaunchRequest{ + Id: &core.WorkflowExecutionIdentifier{ + Project: "project", + Domain: "domain", + Name: "name", + }, + Name: "relaunchy", + OverwriteCache: true, + }, requestedAt) + assert.Nil(t, err) + assert.NotNil(t, asd) + assert.True(t, createCalled) + }) + + t.Run("override disable", func(t *testing.T) { + executionGetFunc := makeExecutionOverwriteCacheGetFunc(t, existingClosureBytes, &startTime, true) + repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetGetCallback(executionGetFunc) + + var createCalled bool + exCreateFunc := func(ctx context.Context, input models.Execution) error { + createCalled = true + assert.Equal(t, "relaunchy", input.Name) + assert.Equal(t, "domain", input.Domain) + assert.Equal(t, "project", input.Project) + assert.Equal(t, uint(8), input.SourceExecutionID) + var spec admin.ExecutionSpec + err := proto.Unmarshal(input.Spec, &spec) + assert.Nil(t, err) + assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.Metadata.Mode) + assert.Equal(t, int32(admin.ExecutionMetadata_RELAUNCH), input.Mode) + assert.False(t, spec.GetOverwriteCache()) + return nil + } + repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback(exCreateFunc) + + asd, err := execManager.RelaunchExecution(context.Background(), admin.ExecutionRelaunchRequest{ + Id: &core.WorkflowExecutionIdentifier{ + Project: "project", + Domain: "domain", + Name: "name", + }, + Name: "relaunchy", + OverwriteCache: false, + }, requestedAt) + assert.Nil(t, err) + assert.NotNil(t, asd) + assert.True(t, createCalled) + }) + + t.Run("override omitted", func(t *testing.T) { + executionGetFunc := makeExecutionOverwriteCacheGetFunc(t, existingClosureBytes, &startTime, true) + repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetGetCallback(executionGetFunc) + + var createCalled bool + exCreateFunc := func(ctx context.Context, input models.Execution) error { + createCalled = true + assert.Equal(t, "relaunchy", input.Name) + assert.Equal(t, "domain", input.Domain) + assert.Equal(t, "project", input.Project) + assert.Equal(t, uint(8), input.SourceExecutionID) + var spec admin.ExecutionSpec + err := proto.Unmarshal(input.Spec, &spec) + assert.Nil(t, err) + assert.Equal(t, admin.ExecutionMetadata_RELAUNCH, spec.Metadata.Mode) + assert.Equal(t, int32(admin.ExecutionMetadata_RELAUNCH), input.Mode) + assert.False(t, spec.GetOverwriteCache()) + return nil + } + repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback(exCreateFunc) + + asd, err := execManager.RelaunchExecution(context.Background(), admin.ExecutionRelaunchRequest{ + Id: &core.WorkflowExecutionIdentifier{ + Project: "project", + Domain: "domain", + Name: "name", + }, + Name: "relaunchy", + }, requestedAt) + assert.Nil(t, err) + assert.NotNil(t, asd) + assert.True(t, createCalled) + }) +} + func TestRecoverExecution(t *testing.T) { // Set up mocks. repository := getMockRepositoryForExecTest() @@ -1580,6 +1818,67 @@ func TestRecoverExecutionInterruptibleOverride(t *testing.T) { assert.True(t, proto.Equal(expectedResponse, response)) } +func TestRecoverExecutionOverwriteCacheOverride(t *testing.T) { + // Set up mocks. + repository := getMockRepositoryForExecTest() + setDefaultLpCallbackForExecTest(repository) + mockExecutor := workflowengineMocks.WorkflowExecutor{} + mockExecutor.OnExecuteMatch(mock.Anything, mock.Anything, mock.Anything).Return(workflowengineInterfaces.ExecutionResponse{}, nil) + mockExecutor.OnID().Return("testMockExecutor") + r := plugins.NewRegistry() + r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &mockExecutor) + execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) + startTime := time.Now() + startTimeProto, _ := ptypes.TimestampProto(startTime) + existingClosure := admin.ExecutionClosure{ + Phase: core.WorkflowExecution_SUCCEEDED, + StartedAt: startTimeProto, + } + existingClosureBytes, _ := proto.Marshal(&existingClosure) + executionGetFunc := makeExecutionOverwriteCacheGetFunc(t, existingClosureBytes, &startTime, true) + repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetGetCallback(executionGetFunc) + + var createCalled bool + exCreateFunc := func(ctx context.Context, input models.Execution) error { + createCalled = true + assert.Equal(t, "recovered", input.Name) + assert.Equal(t, "domain", input.Domain) + assert.Equal(t, "project", input.Project) + assert.Equal(t, uint(8), input.SourceExecutionID) + var spec admin.ExecutionSpec + err := proto.Unmarshal(input.Spec, &spec) + assert.Nil(t, err) + assert.Equal(t, admin.ExecutionMetadata_RECOVERED, spec.Metadata.Mode) + assert.Equal(t, int32(admin.ExecutionMetadata_RECOVERED), input.Mode) + assert.True(t, spec.GetOverwriteCache()) + return nil + } + repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback(exCreateFunc) + + // Issue request. + response, err := execManager.RecoverExecution(context.Background(), admin.ExecutionRecoverRequest{ + Id: &core.WorkflowExecutionIdentifier{ + Project: "project", + Domain: "domain", + Name: "name", + }, + Name: "recovered", + }, requestedAt) + + // And verify response. + assert.Nil(t, err) + + expectedResponse := &admin.ExecutionCreateResponse{ + Id: &core.WorkflowExecutionIdentifier{ + Project: "project", + Domain: "domain", + Name: "recovered", + }, + } + assert.True(t, createCalled) + assert.True(t, proto.Equal(expectedResponse, response)) +} + func TestCreateWorkflowEvent(t *testing.T) { repository := repositoryMocks.NewMockRepository() startTime := time.Now() @@ -2824,7 +3123,8 @@ func TestTerminateExecution(t *testing.T) { r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &mockExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - identity := auth.NewIdentityContext("", principal, "", time.Now(), sets.NewString(), nil, nil) + identity, err := auth.NewIdentityContext("", principal, "", time.Now(), sets.NewString(), nil, nil) + assert.NoError(t, err) ctx := identity.WithContext(context.Background()) resp, err := execManager.TerminateExecution(ctx, admin.ExecutionTerminateRequest{ Id: &core.WorkflowExecutionIdentifier{ @@ -3425,90 +3725,6 @@ func TestListExecutions_LegacyModel(t *testing.T) { assert.Empty(t, executionList.Token) } -func TestGetTaskResourcesAsSet(t *testing.T) { - taskResources := getTaskResourcesAsSet(context.TODO(), &core.Identifier{}, []*core.Resources_ResourceEntry{ - { - Name: core.Resources_CPU, - Value: "100", - }, - { - Name: core.Resources_MEMORY, - Value: "200", - }, - { - Name: core.Resources_EPHEMERAL_STORAGE, - Value: "300", - }, - { - Name: core.Resources_GPU, - Value: "400", - }, - }, "request") - assert.True(t, taskResources.CPU.Equal(resource.MustParse("100"))) - assert.True(t, taskResources.Memory.Equal(resource.MustParse("200"))) - assert.True(t, taskResources.EphemeralStorage.Equal(resource.MustParse("300"))) - assert.True(t, taskResources.GPU.Equal(resource.MustParse("400"))) -} - -func TestGetCompleteTaskResourceRequirements(t *testing.T) { - taskResources := getCompleteTaskResourceRequirements(context.TODO(), &core.Identifier{}, &core.CompiledTask{ - Template: &core.TaskTemplate{ - Target: &core.TaskTemplate_Container{ - Container: &core.Container{ - Resources: &core.Resources{ - Requests: []*core.Resources_ResourceEntry{ - { - Name: core.Resources_CPU, - Value: "100", - }, - { - Name: core.Resources_MEMORY, - Value: "200", - }, - { - Name: core.Resources_EPHEMERAL_STORAGE, - Value: "300", - }, - { - Name: core.Resources_GPU, - Value: "400", - }, - }, - Limits: []*core.Resources_ResourceEntry{ - { - Name: core.Resources_CPU, - Value: "200", - }, - { - Name: core.Resources_MEMORY, - Value: "400", - }, - { - Name: core.Resources_EPHEMERAL_STORAGE, - Value: "600", - }, - { - Name: core.Resources_GPU, - Value: "800", - }, - }, - }, - }, - }, - }, - }) - - assert.True(t, taskResources.Defaults.CPU.Equal(resource.MustParse("100"))) - assert.True(t, taskResources.Defaults.Memory.Equal(resource.MustParse("200"))) - assert.True(t, taskResources.Defaults.EphemeralStorage.Equal(resource.MustParse("300"))) - assert.True(t, taskResources.Defaults.GPU.Equal(resource.MustParse("400"))) - - assert.True(t, taskResources.Limits.CPU.Equal(resource.MustParse("200"))) - assert.True(t, taskResources.Limits.Memory.Equal(resource.MustParse("400"))) - assert.True(t, taskResources.Limits.EphemeralStorage.Equal(resource.MustParse("600"))) - assert.True(t, taskResources.Limits.GPU.Equal(resource.MustParse("800"))) -} - func TestSetDefaults(t *testing.T) { task := &core.CompiledTask{ Template: &core.TaskTemplate{ @@ -3787,11 +4003,11 @@ func TestCreateSingleTaskExecution(t *testing.T) { repository := getMockRepositoryForExecTest() var getCalledCount = 0 var newlyCreatedWorkflow models.Workflow - workflowcreateFunc := func(input models.Workflow) error { + workflowCreateFunc := func(input models.Workflow, descriptionEntity *models.DescriptionEntity) error { newlyCreatedWorkflow = input return nil } - repository.WorkflowRepo().(*repositoryMocks.MockWorkflowRepo).SetCreateCallback(workflowcreateFunc) + repository.WorkflowRepo().(*repositoryMocks.MockWorkflowRepo).SetCreateCallback(workflowCreateFunc) workflowGetFunc := func(input interfaces.Identifier) (models.Workflow, error) { if getCalledCount <= 1 { @@ -3802,6 +4018,13 @@ func TestCreateSingleTaskExecution(t *testing.T) { return newlyCreatedWorkflow, nil } repository.WorkflowRepo().(*repositoryMocks.MockWorkflowRepo).SetGetCallback(workflowGetFunc) + taskIdentifier := &core.Identifier{ + ResourceType: core.ResourceType_TASK, + Project: "flytekit", + Domain: "production", + Name: "simple_task", + Version: "12345", + } repository.TaskRepo().(*repositoryMocks.MockTaskRepo).SetGetCallback( func(input interfaces.Identifier) (models.Task, error) { createdAt := time.Now() @@ -3809,13 +4032,7 @@ func TestCreateSingleTaskExecution(t *testing.T) { taskClosure := &admin.TaskClosure{ CompiledTask: &core.CompiledTask{ Template: &core.TaskTemplate{ - Id: &core.Identifier{ - ResourceType: core.ResourceType_TASK, - Project: "flytekit", - Domain: "production", - Name: "simple_task", - Version: "12345", - }, + Id: taskIdentifier, Type: "python-task", Metadata: &core.TaskMetadata{ Runtime: &core.RuntimeMetadata{ @@ -3902,6 +4119,33 @@ func TestCreateSingleTaskExecution(t *testing.T) { Type: "python", }, nil }) + repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback( + func(ctx context.Context, input models.Execution) error { + var spec admin.ExecutionSpec + err := proto.Unmarshal(input.Spec, &spec) + assert.NoError(t, err) + assert.Equal(t, models.ExecutionKey{ + Project: "flytekit", + Domain: "production", + Name: "singletaskexec", + }, input.ExecutionKey) + assert.Equal(t, "task", input.LaunchEntity) + assert.Equal(t, "UNDEFINED", input.Phase) + assert.True(t, proto.Equal(taskIdentifier, spec.LaunchPlan)) + return nil + }) + + var launchplan *models.LaunchPlan + repository.LaunchPlanRepo().(*repositoryMocks.MockLaunchPlanRepo).SetCreateCallback(func(input models.LaunchPlan) error { + launchplan = &input + return nil + }) + repository.LaunchPlanRepo().(*repositoryMocks.MockLaunchPlanRepo).SetGetCallback(func(input interfaces.Identifier) (models.LaunchPlan, error) { + if launchplan == nil { + return models.LaunchPlan{}, flyteAdminErrors.NewFlyteAdminError(codes.NotFound, "launchplan not found") + } + return *launchplan, nil + }) mockStorage := getMockStorageForExecTest(context.Background()) workflowManager := NewWorkflowManager( @@ -3921,13 +4165,7 @@ func TestCreateSingleTaskExecution(t *testing.T) { Domain: "production", Name: "singletaskexec", Spec: &admin.ExecutionSpec{ - LaunchPlan: &core.Identifier{ - Project: "flytekit", - Domain: "production", - Name: "simple_task", - Version: "12345", - ResourceType: core.ResourceType_TASK, - }, + LaunchPlan: taskIdentifier, }, Inputs: &core.LiteralMap{ Literals: map[string]*core.Literal{ @@ -3947,45 +4185,17 @@ func TestCreateSingleTaskExecution(t *testing.T) { }, }, } + marshaller := jsonpb.Marshaler{} - stringReq, ferr := marshaller.MarshalToString(&request) + _, ferr := marshaller.MarshalToString(&request) assert.NoError(t, ferr) - println(fmt.Sprintf("req: %+v", stringReq)) - _, err := execManager.CreateExecution(context.TODO(), admin.ExecutionCreateRequest{ - Project: "flytekit", - Domain: "production", - Name: "singletaskexec", - Spec: &admin.ExecutionSpec{ - LaunchPlan: &core.Identifier{ - Project: "flytekit", - Domain: "production", - Name: "simple_task", - Version: "12345", - ResourceType: core.ResourceType_TASK, - }, - AuthRole: &admin.AuthRole{ - KubernetesServiceAccount: "foo", - }, - }, - Inputs: &core.LiteralMap{ - Literals: map[string]*core.Literal{ - "a": { - Value: &core.Literal_Scalar{ - Scalar: &core.Scalar{ - Value: &core.Scalar_Primitive{ - Primitive: &core.Primitive{ - Value: &core.Primitive_Integer{ - Integer: 999, - }, - }, - }, - }, - }, - }, - }, - }, - }, time.Now()) + // test once to create an initial launchplan + _, err := execManager.CreateExecution(context.TODO(), request, time.Now()) + assert.NoError(t, err) + + // test again to ensure existing launchplan retrieval works + _, err = execManager.CreateExecution(context.TODO(), request, time.Now()) assert.NoError(t, err) } @@ -3997,6 +4207,7 @@ func TestGetExecutionConfigOverrides(t *testing.T) { requestK8sServiceAccount := "requestK8sServiceAccount" requestMaxParallelism := int32(10) requestInterruptible := false + requestOverwriteCache := false launchPlanLabels := map[string]string{"launchPlanLabelKey": "launchPlanLabelValue"} launchPlanAnnotations := map[string]string{"launchPlanAnnotationKey": "launchPlanAnnotationValue"} @@ -4005,6 +4216,7 @@ func TestGetExecutionConfigOverrides(t *testing.T) { launchPlanAssumableIamRole := "launchPlanAssumableIamRole" launchPlanMaxParallelism := int32(50) launchPlanInterruptible := true + launchPlanOverwriteCache := true applicationConfig := runtime.NewConfigurationProvider() @@ -4018,6 +4230,7 @@ func TestGetExecutionConfigOverrides(t *testing.T) { rmK8sServiceAccount := "rmK8sServiceAccount" rmMaxParallelism := int32(80) rmInterruptible := false + rmOverwriteCache := false resourceManager := managerMocks.MockResourceManager{} executionManager := ExecutionManager{ @@ -4041,6 +4254,7 @@ func TestGetExecutionConfigOverrides(t *testing.T) { WorkflowExecutionConfig: &admin.WorkflowExecutionConfig{ MaxParallelism: rmMaxParallelism, Interruptible: &wrappers.BoolValue{Value: rmInterruptible}, + OverwriteCache: rmOverwriteCache, Annotations: &admin.Annotations{Values: rmAnnotations}, RawOutputDataConfig: &admin.RawOutputDataConfig{ OutputLocationPrefix: rmOutputLocationPrefix, @@ -4090,6 +4304,7 @@ func TestGetExecutionConfigOverrides(t *testing.T) { }, MaxParallelism: requestMaxParallelism, Interruptible: &wrappers.BoolValue{Value: requestInterruptible}, + OverwriteCache: requestOverwriteCache, }, } execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, nil) @@ -4097,6 +4312,7 @@ func TestGetExecutionConfigOverrides(t *testing.T) { assert.Equal(t, requestMaxParallelism, execConfig.MaxParallelism) assert.Equal(t, requestK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) assert.Equal(t, requestInterruptible, execConfig.Interruptible.Value) + assert.Equal(t, requestOverwriteCache, execConfig.OverwriteCache) assert.Equal(t, requestOutputLocationPrefix, execConfig.RawOutputDataConfig.OutputLocationPrefix) assert.Equal(t, requestLabels, execConfig.GetLabels().Values) assert.Equal(t, requestAnnotations, execConfig.GetAnnotations().Values) @@ -4126,12 +4342,14 @@ func TestGetExecutionConfigOverrides(t *testing.T) { }, MaxParallelism: launchPlanMaxParallelism, Interruptible: &wrappers.BoolValue{Value: launchPlanInterruptible}, + OverwriteCache: launchPlanOverwriteCache, }, } execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan) assert.NoError(t, err) assert.Equal(t, requestMaxParallelism, execConfig.MaxParallelism) assert.Equal(t, launchPlanInterruptible, execConfig.Interruptible.Value) + assert.Equal(t, launchPlanOverwriteCache, execConfig.OverwriteCache) assert.True(t, proto.Equal(launchPlan.Spec.SecurityContext, execConfig.SecurityContext)) assert.True(t, proto.Equal(launchPlan.Spec.Annotations, execConfig.Annotations)) assert.Equal(t, requestOutputLocationPrefix, execConfig.RawOutputDataConfig.OutputLocationPrefix) @@ -4162,12 +4380,14 @@ func TestGetExecutionConfigOverrides(t *testing.T) { }, MaxParallelism: launchPlanMaxParallelism, Interruptible: &wrappers.BoolValue{Value: launchPlanInterruptible}, + OverwriteCache: launchPlanOverwriteCache, }, } execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan) assert.NoError(t, err) assert.Equal(t, launchPlanMaxParallelism, execConfig.MaxParallelism) assert.Equal(t, launchPlanInterruptible, execConfig.Interruptible.Value) + assert.Equal(t, launchPlanOverwriteCache, execConfig.OverwriteCache) assert.Equal(t, launchPlanK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) assert.Equal(t, launchPlanOutputLocationPrefix, execConfig.RawOutputDataConfig.OutputLocationPrefix) assert.Equal(t, launchPlanLabels, execConfig.GetLabels().Values) @@ -4192,12 +4412,14 @@ func TestGetExecutionConfigOverrides(t *testing.T) { }, MaxParallelism: launchPlanMaxParallelism, Interruptible: &wrappers.BoolValue{Value: launchPlanInterruptible}, + OverwriteCache: launchPlanOverwriteCache, }, } execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan) assert.NoError(t, err) assert.Equal(t, launchPlanMaxParallelism, execConfig.MaxParallelism) assert.Equal(t, launchPlanInterruptible, execConfig.Interruptible.Value) + assert.Equal(t, launchPlanOverwriteCache, execConfig.OverwriteCache) assert.Equal(t, launchPlanK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) assert.Equal(t, launchPlanOutputLocationPrefix, execConfig.RawOutputDataConfig.OutputLocationPrefix) assert.Equal(t, launchPlanLabels, execConfig.GetLabels().Values) @@ -4228,6 +4450,7 @@ func TestGetExecutionConfigOverrides(t *testing.T) { assert.NoError(t, err) assert.Equal(t, launchPlanMaxParallelism, execConfig.MaxParallelism) assert.Equal(t, rmInterruptible, execConfig.Interruptible.Value) + assert.Equal(t, rmOverwriteCache, execConfig.OverwriteCache) assert.Equal(t, launchPlanK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) assert.Equal(t, launchPlanOutputLocationPrefix, execConfig.RawOutputDataConfig.OutputLocationPrefix) assert.Equal(t, launchPlanLabels, execConfig.GetLabels().Values) @@ -4246,6 +4469,7 @@ func TestGetExecutionConfigOverrides(t *testing.T) { assert.NoError(t, err) assert.Equal(t, rmMaxParallelism, execConfig.MaxParallelism) assert.Equal(t, rmInterruptible, execConfig.Interruptible.Value) + assert.Equal(t, rmOverwriteCache, execConfig.OverwriteCache) assert.Equal(t, rmK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) assert.Equal(t, rmOutputLocationPrefix, execConfig.RawOutputDataConfig.OutputLocationPrefix) assert.Equal(t, rmLabels, execConfig.GetLabels().Values) @@ -4291,6 +4515,7 @@ func TestGetExecutionConfigOverrides(t *testing.T) { assert.NoError(t, err) assert.Equal(t, rmMaxParallelism, execConfig.MaxParallelism) assert.Nil(t, execConfig.GetInterruptible()) + assert.False(t, execConfig.OverwriteCache) assert.Equal(t, rmK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) assert.Nil(t, execConfig.GetRawOutputDataConfig()) assert.Nil(t, execConfig.GetLabels()) @@ -4327,6 +4552,7 @@ func TestGetExecutionConfigOverrides(t *testing.T) { assert.NoError(t, err) assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism) assert.Nil(t, execConfig.GetInterruptible()) + assert.False(t, execConfig.OverwriteCache) assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) assert.Nil(t, execConfig.GetRawOutputDataConfig()) assert.Nil(t, execConfig.GetLabels()) @@ -4368,6 +4594,7 @@ func TestGetExecutionConfigOverrides(t *testing.T) { assert.NoError(t, err) assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism) assert.Nil(t, execConfig.GetInterruptible()) + assert.False(t, execConfig.OverwriteCache) assert.Equal(t, deprecatedLaunchPlanK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) assert.Nil(t, execConfig.GetRawOutputDataConfig()) assert.Nil(t, execConfig.GetLabels()) @@ -4393,6 +4620,7 @@ func TestGetExecutionConfigOverrides(t *testing.T) { WorkflowExecutionConfig: &admin.WorkflowExecutionConfig{ MaxParallelism: 300, Interruptible: &wrappers.BoolValue{Value: true}, + OverwriteCache: true, SecurityContext: &core.SecurityContext{ RunAs: &core.Identity{ K8SServiceAccount: "workflowDefault", @@ -4419,6 +4647,7 @@ func TestGetExecutionConfigOverrides(t *testing.T) { assert.NoError(t, err) assert.Equal(t, int32(300), execConfig.MaxParallelism) assert.True(t, execConfig.Interruptible.Value) + assert.True(t, execConfig.OverwriteCache) assert.Equal(t, "workflowDefault", execConfig.SecurityContext.RunAs.K8SServiceAccount) assert.Nil(t, execConfig.GetRawOutputDataConfig()) assert.Nil(t, execConfig.GetLabels()) @@ -4448,6 +4677,7 @@ func TestGetExecutionConfigOverrides(t *testing.T) { execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan) assert.Equal(t, fmt.Errorf("failed to fetch the resources"), err) assert.Nil(t, execConfig.GetInterruptible()) + assert.False(t, execConfig.GetOverwriteCache()) assert.Nil(t, execConfig.GetSecurityContext()) assert.Nil(t, execConfig.GetRawOutputDataConfig()) assert.Nil(t, execConfig.GetLabels()) @@ -4474,6 +4704,7 @@ func TestGetExecutionConfigOverrides(t *testing.T) { } executionManager.config.ApplicationConfiguration().GetTopLevelConfig().Interruptible = true + executionManager.config.ApplicationConfiguration().GetTopLevelConfig().OverwriteCache = true t.Run("request with interruptible override disabled", func(t *testing.T) { request := &admin.ExecutionCreateRequest{ @@ -4615,6 +4846,106 @@ func TestGetExecutionConfigOverrides(t *testing.T) { assert.Nil(t, execConfig.GetLabels()) assert.Nil(t, execConfig.GetAnnotations()) }) + t.Run("request with skip cache override enabled", func(t *testing.T) { + request := &admin.ExecutionCreateRequest{ + Project: workflowIdentifier.Project, + Domain: workflowIdentifier.Domain, + Spec: &admin.ExecutionSpec{ + OverwriteCache: true, + }, + } + + execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, nil) + assert.NoError(t, err) + assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism) + assert.True(t, execConfig.OverwriteCache) + assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) + assert.Nil(t, execConfig.GetRawOutputDataConfig()) + assert.Nil(t, execConfig.GetLabels()) + assert.Nil(t, execConfig.GetAnnotations()) + }) + t.Run("request with no skip cache override specified", func(t *testing.T) { + request := &admin.ExecutionCreateRequest{ + Project: workflowIdentifier.Project, + Domain: workflowIdentifier.Domain, + Spec: &admin.ExecutionSpec{}, + } + + execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, nil) + assert.NoError(t, err) + assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism) + assert.True(t, execConfig.OverwriteCache) + assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) + assert.Nil(t, execConfig.GetRawOutputDataConfig()) + assert.Nil(t, execConfig.GetLabels()) + assert.Nil(t, execConfig.GetAnnotations()) + }) + t.Run("launch plan with skip cache override enabled", func(t *testing.T) { + request := &admin.ExecutionCreateRequest{ + Project: workflowIdentifier.Project, + Domain: workflowIdentifier.Domain, + Spec: &admin.ExecutionSpec{}, + } + + launchPlan := &admin.LaunchPlan{ + Spec: &admin.LaunchPlanSpec{ + OverwriteCache: true, + }, + } + + execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan) + assert.NoError(t, err) + assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism) + assert.True(t, execConfig.OverwriteCache) + assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) + assert.Nil(t, execConfig.GetRawOutputDataConfig()) + assert.Nil(t, execConfig.GetLabels()) + assert.Nil(t, execConfig.GetAnnotations()) + }) + t.Run("launch plan with no skip cache override specified", func(t *testing.T) { + request := &admin.ExecutionCreateRequest{ + Project: workflowIdentifier.Project, + Domain: workflowIdentifier.Domain, + Spec: &admin.ExecutionSpec{}, + } + + launchPlan := &admin.LaunchPlan{ + Spec: &admin.LaunchPlanSpec{}, + } + + execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan) + assert.NoError(t, err) + assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism) + assert.True(t, execConfig.OverwriteCache) + assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) + assert.Nil(t, execConfig.GetRawOutputDataConfig()) + assert.Nil(t, execConfig.GetLabels()) + assert.Nil(t, execConfig.GetAnnotations()) + }) + t.Run("request and launch plan with different skip cache overrides", func(t *testing.T) { + request := &admin.ExecutionCreateRequest{ + Project: workflowIdentifier.Project, + Domain: workflowIdentifier.Domain, + Spec: &admin.ExecutionSpec{ + OverwriteCache: true, + }, + } + + launchPlan := &admin.LaunchPlan{ + Spec: &admin.LaunchPlanSpec{ + OverwriteCache: false, + }, + } + + execConfig, err := executionManager.getExecutionConfig(context.TODO(), request, launchPlan) + assert.NoError(t, err) + assert.Equal(t, defaultMaxParallelism, execConfig.MaxParallelism) + assert.True(t, execConfig.OverwriteCache) + assert.Equal(t, defaultK8sServiceAccount, execConfig.SecurityContext.RunAs.K8SServiceAccount) + assert.Nil(t, execConfig.GetRawOutputDataConfig()) + assert.Nil(t, execConfig.GetLabels()) + assert.Nil(t, execConfig.GetAnnotations()) + }) }) } @@ -4635,6 +4966,7 @@ func TestGetExecutionConfig(t *testing.T) { Target: &admin.MatchingAttributes_WorkflowExecutionConfig{ WorkflowExecutionConfig: &admin.WorkflowExecutionConfig{ MaxParallelism: 100, + OverwriteCache: true, }, }, }, @@ -4653,6 +4985,7 @@ func TestGetExecutionConfig(t *testing.T) { }, nil) assert.NoError(t, err) assert.Equal(t, execConfig.MaxParallelism, int32(100)) + assert.True(t, execConfig.OverwriteCache) } func TestGetExecutionConfig_Spec(t *testing.T) { @@ -4671,14 +5004,17 @@ func TestGetExecutionConfig_Spec(t *testing.T) { Domain: workflowIdentifier.Domain, Spec: &admin.ExecutionSpec{ MaxParallelism: 100, + OverwriteCache: true, }, }, &admin.LaunchPlan{ Spec: &admin.LaunchPlanSpec{ MaxParallelism: 50, + OverwriteCache: false, // explicitly set to false for clarity }, }) assert.NoError(t, err) assert.Equal(t, int32(100), execConfig.MaxParallelism) + assert.True(t, execConfig.OverwriteCache) execConfig, err = executionManager.getExecutionConfig(context.TODO(), &admin.ExecutionCreateRequest{ Project: workflowIdentifier.Project, @@ -4687,10 +5023,12 @@ func TestGetExecutionConfig_Spec(t *testing.T) { }, &admin.LaunchPlan{ Spec: &admin.LaunchPlanSpec{ MaxParallelism: 50, + OverwriteCache: true, }, }) assert.NoError(t, err) assert.Equal(t, int32(50), execConfig.MaxParallelism) + assert.True(t, execConfig.OverwriteCache) resourceManager = managerMocks.MockResourceManager{} resourceManager.GetResourceFunc = func(ctx context.Context, @@ -4702,6 +5040,8 @@ func TestGetExecutionConfig_Spec(t *testing.T) { config: applicationConfig, } + executionManager.config.ApplicationConfiguration().GetTopLevelConfig().OverwriteCache = true + execConfig, err = executionManager.getExecutionConfig(context.TODO(), &admin.ExecutionCreateRequest{ Project: workflowIdentifier.Project, Domain: workflowIdentifier.Domain, @@ -4711,6 +5051,7 @@ func TestGetExecutionConfig_Spec(t *testing.T) { }) assert.NoError(t, err) assert.Equal(t, execConfig.MaxParallelism, int32(25)) + assert.True(t, execConfig.OverwriteCache) } func TestGetClusterAssignment(t *testing.T) { @@ -4954,122 +5295,6 @@ func TestResolvePermissions(t *testing.T) { }) } -func TestGetTaskResources(t *testing.T) { - taskConfig := runtimeMocks.MockTaskResourceConfiguration{} - taskConfig.Defaults = runtimeInterfaces.TaskResourceSet{ - CPU: resource.MustParse("200m"), - GPU: resource.MustParse("8"), - Memory: resource.MustParse("200Gi"), - EphemeralStorage: resource.MustParse("500Mi"), - Storage: resource.MustParse("400Mi"), - } - taskConfig.Limits = runtimeInterfaces.TaskResourceSet{ - CPU: resource.MustParse("300m"), - GPU: resource.MustParse("8"), - Memory: resource.MustParse("500Gi"), - EphemeralStorage: resource.MustParse("501Mi"), - Storage: resource.MustParse("450Mi"), - } - mockConfig := runtimeMocks.NewMockConfigurationProvider( - testutils.GetApplicationConfigWithDefaultDomains(), nil, nil, &taskConfig, - runtimeMocks.NewMockWhitelistConfiguration(), nil) - - t.Run("use runtime application values", func(t *testing.T) { - r := plugins.NewRegistry() - r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) - execManager := NewExecutionManager(repositoryMocks.NewMockRepository(), r, mockConfig, getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - taskResourceAttrs := execManager.(*ExecutionManager).getTaskResources(context.TODO(), &workflowIdentifier) - assert.EqualValues(t, taskResourceAttrs, workflowengineInterfaces.TaskResources{ - Defaults: runtimeInterfaces.TaskResourceSet{ - CPU: resource.MustParse("200m"), - GPU: resource.MustParse("8"), - Memory: resource.MustParse("200Gi"), - EphemeralStorage: resource.MustParse("500Mi"), - Storage: resource.MustParse("400Mi"), - }, - Limits: runtimeInterfaces.TaskResourceSet{ - CPU: resource.MustParse("300m"), - GPU: resource.MustParse("8"), - Memory: resource.MustParse("500Gi"), - EphemeralStorage: resource.MustParse("501Mi"), - Storage: resource.MustParse("450Mi"), - }, - }) - }) - t.Run("use specific overrides", func(t *testing.T) { - resourceManager := managerMocks.MockResourceManager{} - resourceManager.GetResourceFunc = func(ctx context.Context, - request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) { - assert.EqualValues(t, request, managerInterfaces.ResourceRequest{ - Project: workflowIdentifier.Project, - Domain: workflowIdentifier.Domain, - Workflow: workflowIdentifier.Name, - ResourceType: admin.MatchableResource_TASK_RESOURCE, - }) - return &managerInterfaces.ResourceResponse{ - Attributes: &admin.MatchingAttributes{ - Target: &admin.MatchingAttributes_TaskResourceAttributes{ - TaskResourceAttributes: &admin.TaskResourceAttributes{ - Defaults: &admin.TaskResourceSpec{ - Cpu: "1200m", - Gpu: "18", - Memory: "1200Gi", - EphemeralStorage: "1500Mi", - Storage: "1400Mi", - }, - Limits: &admin.TaskResourceSpec{ - Cpu: "300m", - Gpu: "8", - Memory: "500Gi", - EphemeralStorage: "501Mi", - Storage: "450Mi", - }, - }, - }, - }, - }, nil - } - executionManager := ExecutionManager{ - resourceManager: &resourceManager, - config: mockConfig, - } - taskResourceAttrs := executionManager.getTaskResources(context.TODO(), &workflowIdentifier) - assert.EqualValues(t, taskResourceAttrs, workflowengineInterfaces.TaskResources{ - Defaults: runtimeInterfaces.TaskResourceSet{ - CPU: resource.MustParse("1200m"), - GPU: resource.MustParse("18"), - Memory: resource.MustParse("1200Gi"), - EphemeralStorage: resource.MustParse("1500Mi"), - Storage: resource.MustParse("1400Mi"), - }, - Limits: runtimeInterfaces.TaskResourceSet{ - CPU: resource.MustParse("300m"), - GPU: resource.MustParse("8"), - Memory: resource.MustParse("500Gi"), - EphemeralStorage: resource.MustParse("501Mi"), - Storage: resource.MustParse("450Mi"), - }, - }) - }) -} - -func TestFromAdminProtoTaskResourceSpec(t *testing.T) { - taskResourceSet := fromAdminProtoTaskResourceSpec(context.TODO(), &admin.TaskResourceSpec{ - Cpu: "1", - Memory: "100", - Storage: "200", - EphemeralStorage: "300", - Gpu: "2", - }) - assert.EqualValues(t, runtimeInterfaces.TaskResourceSet{ - CPU: resource.MustParse("1"), - Memory: resource.MustParse("100"), - Storage: resource.MustParse("200"), - EphemeralStorage: resource.MustParse("300"), - GPU: resource.MustParse("2"), - }, taskResourceSet) -} - func TestAddStateFilter(t *testing.T) { t.Run("empty filters", func(t *testing.T) { var filters []common.InlineFilter diff --git a/pkg/manager/impl/metrics_manager.go b/pkg/manager/impl/metrics_manager.go new file mode 100644 index 000000000..a6d010b1e --- /dev/null +++ b/pkg/manager/impl/metrics_manager.go @@ -0,0 +1,684 @@ +package impl + +import ( + "context" + "fmt" + "reflect" + "sort" + "time" + + "github.com/flyteorg/flyteadmin/pkg/manager/interfaces" + + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" + + "github.com/flyteorg/flytepropeller/pkg/apis/flyteworkflow/v1alpha1" + + "github.com/flyteorg/flytestdlib/promutils" + + "github.com/golang/protobuf/ptypes/duration" + "github.com/golang/protobuf/ptypes/timestamp" + + "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + RequestLimit uint32 = 50 + + nodeIdle = "NODE_IDLE" + nodeReset = "NODE_RESET" + nodeSetup = "NODE_SETUP" + nodeTeardown = "NODE_TEARDOWN" + nodeTransition = "NODE_TRANSITION" + taskRuntime = "TASK_RUNTIME" + taskSetup = "TASK_SETUP" + taskTeardown = "TASK_TEARDOWN" + workflowSetup = "WORKFLOW_SETUP" + workflowTeardown = "WORKFLOW_TEARDOWN" +) + +var ( + emptyDuration *duration.Duration = &duration.Duration{ + Seconds: 0, + Nanos: 0, + } + emptyTimestamp *timestamp.Timestamp = ×tamp.Timestamp{ + Seconds: 0, + Nanos: 0, + } +) + +type metrics struct { + Scope promutils.Scope +} + +// MetricsManager handles computation of workflow, node, and task execution metrics. +type MetricsManager struct { + workflowManager interfaces.WorkflowInterface + executionManager interfaces.ExecutionInterface + nodeExecutionManager interfaces.NodeExecutionInterface + taskExecutionManager interfaces.TaskExecutionInterface + metrics metrics +} + +// createOperationSpan returns a Span defined by the provided arguments. +func createOperationSpan(startTime, endTime *timestamp.Timestamp, operation string) *core.Span { + return &core.Span{ + StartTime: startTime, + EndTime: endTime, + Id: &core.Span_OperationId{ + OperationId: operation, + }, + } +} + +// getBranchNode searches the provided BranchNode definition for the Node identified by nodeID. +func getBranchNode(nodeID string, branchNode *core.BranchNode) *core.Node { + if branchNode.IfElse.Case.ThenNode.Id == nodeID { + return branchNode.IfElse.Case.ThenNode + } + + for _, other := range branchNode.IfElse.Other { + if other.ThenNode.Id == nodeID { + return other.ThenNode + } + } + + if elseNode, ok := branchNode.IfElse.Default.(*core.IfElseBlock_ElseNode); ok { + if elseNode.ElseNode.Id == nodeID { + return elseNode.ElseNode + } + } + + return nil +} + +// getLatestUpstreamNodeExecution returns the NodeExecution with the latest UpdatedAt timestamp that is an upstream +// dependency of the provided nodeID. This is useful for computing the duration between when a node is first available +// for scheduling and when it is actually scheduled. +func (m *MetricsManager) getLatestUpstreamNodeExecution(nodeID string, upstreamNodeIds map[string]*core.ConnectionSet_IdList, + nodeExecutions map[string]*admin.NodeExecution) *admin.NodeExecution { + + var nodeExecution *admin.NodeExecution + var latestUpstreamUpdatedAt = time.Unix(0, 0) + if connectionSet, exists := upstreamNodeIds[nodeID]; exists { + for _, upstreamNodeID := range connectionSet.Ids { + upstreamNodeExecution, exists := nodeExecutions[upstreamNodeID] + if !exists { + continue + } + + t := upstreamNodeExecution.Closure.UpdatedAt.AsTime() + if t.After(latestUpstreamUpdatedAt) { + nodeExecution = upstreamNodeExecution + latestUpstreamUpdatedAt = t + } + } + } + + return nodeExecution +} + +// getNodeExecutions queries the nodeExecutionManager for NodeExecutions adhering to the specified request. +func (m *MetricsManager) getNodeExecutions(ctx context.Context, request admin.NodeExecutionListRequest) (map[string]*admin.NodeExecution, error) { + nodeExecutions := make(map[string]*admin.NodeExecution) + for { + response, err := m.nodeExecutionManager.ListNodeExecutions(ctx, request) + if err != nil { + return nil, err + } + + for _, nodeExecution := range response.NodeExecutions { + nodeExecutions[nodeExecution.Metadata.SpecNodeId] = nodeExecution + } + + if len(response.NodeExecutions) < int(request.Limit) { + break + } + + request.Token = response.Token + } + + return nodeExecutions, nil +} + +// getTaskExecutions queries the taskExecutionManager for TaskExecutions adhering to the specified request. +func (m *MetricsManager) getTaskExecutions(ctx context.Context, request admin.TaskExecutionListRequest) ([]*admin.TaskExecution, error) { + taskExecutions := make([]*admin.TaskExecution, 0) + for { + response, err := m.taskExecutionManager.ListTaskExecutions(ctx, request) + if err != nil { + return nil, err + } + + taskExecutions = append(taskExecutions, response.TaskExecutions...) + + if len(response.TaskExecutions) < int(request.Limit) { + break + } + + request.Token = response.Token + } + + return taskExecutions, nil +} + +// parseBranchNodeExecution partitions the BranchNode execution into a collection of Categorical and Reference Spans +// which are appended to the provided spans argument. +func (m *MetricsManager) parseBranchNodeExecution(ctx context.Context, + nodeExecution *admin.NodeExecution, branchNode *core.BranchNode, spans *[]*core.Span, depth int) error { + + // retrieve node execution(s) + nodeExecutions, err := m.getNodeExecutions(ctx, admin.NodeExecutionListRequest{ + WorkflowExecutionId: nodeExecution.Id.ExecutionId, + Limit: RequestLimit, + UniqueParentId: nodeExecution.Id.NodeId, + }) + if err != nil { + return err + } + + // check if the node started + if len(nodeExecutions) == 0 { + *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, nodeExecution.Closure.UpdatedAt, nodeSetup)) + } else { + // parse branchNode + if len(nodeExecutions) != 1 { + return fmt.Errorf("invalid branch node execution: expected 1 but found %d node execution(s)", len(nodeExecutions)) + } + + var branchNodeExecution *admin.NodeExecution + for _, e := range nodeExecutions { + branchNodeExecution = e + } + + node := getBranchNode(branchNodeExecution.Metadata.SpecNodeId, branchNode) + if node == nil { + return fmt.Errorf("failed to identify branch node final node definition for nodeID '%s' and branchNode '%+v'", + branchNodeExecution.Metadata.SpecNodeId, branchNode) + } + + // frontend overhead + *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, branchNodeExecution.Closure.CreatedAt, nodeSetup)) + + // node execution + nodeExecutionSpan, err := m.parseNodeExecution(ctx, branchNodeExecution, node, depth) + if err != nil { + return err + } + + *spans = append(*spans, nodeExecutionSpan) + + // backened overhead + if !nodeExecution.Closure.UpdatedAt.AsTime().Before(branchNodeExecution.Closure.UpdatedAt.AsTime()) { + *spans = append(*spans, createOperationSpan(branchNodeExecution.Closure.UpdatedAt, + nodeExecution.Closure.UpdatedAt, nodeTeardown)) + } + } + + return nil +} + +// parseDynamicNodeExecution partitions the DynamicNode execution into a collection of Categorical and Reference Spans +// which are appended to the provided spans argument. +func (m *MetricsManager) parseDynamicNodeExecution(ctx context.Context, nodeExecution *admin.NodeExecution, spans *[]*core.Span, depth int) error { + taskExecutions, err := m.getTaskExecutions(ctx, admin.TaskExecutionListRequest{ + NodeExecutionId: nodeExecution.Id, + Limit: RequestLimit, + }) + if err != nil { + return err + } + + // if no task executions then everything is execution overhead + if len(taskExecutions) == 0 { + *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, nodeExecution.Closure.UpdatedAt, nodeSetup)) + } else { + // frontend overhead + *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, taskExecutions[0].Closure.CreatedAt, nodeSetup)) + + // task execution(s) + parseTaskExecutions(taskExecutions, spans, depth) + + nodeExecutions, err := m.getNodeExecutions(ctx, admin.NodeExecutionListRequest{ + WorkflowExecutionId: nodeExecution.Id.ExecutionId, + Limit: RequestLimit, + UniqueParentId: nodeExecution.Id.NodeId, + }) + if err != nil { + return err + } + + lastTask := taskExecutions[len(taskExecutions)-1] + if len(nodeExecutions) == 0 { + if !nodeExecution.Closure.UpdatedAt.AsTime().Before(lastTask.Closure.UpdatedAt.AsTime()) { + *spans = append(*spans, createOperationSpan(lastTask.Closure.UpdatedAt, nodeExecution.Closure.UpdatedAt, nodeReset)) + } + } else { + // between task execution(s) and node execution(s) overhead + startNode := nodeExecutions[v1alpha1.StartNodeID] + *spans = append(*spans, createOperationSpan(taskExecutions[len(taskExecutions)-1].Closure.UpdatedAt, + startNode.Closure.UpdatedAt, nodeReset)) + + // node execution(s) + getDataRequest := admin.NodeExecutionGetDataRequest{Id: nodeExecution.Id} + nodeExecutionData, err := m.nodeExecutionManager.GetNodeExecutionData(ctx, getDataRequest) + if err != nil { + return err + } + + if err := m.parseNodeExecutions(ctx, nodeExecutions, nodeExecutionData.DynamicWorkflow.CompiledWorkflow, spans, depth); err != nil { + return err + } + + // backened overhead + latestUpstreamNode := m.getLatestUpstreamNodeExecution(v1alpha1.EndNodeID, + nodeExecutionData.DynamicWorkflow.CompiledWorkflow.Primary.Connections.Upstream, nodeExecutions) + if latestUpstreamNode != nil && !nodeExecution.Closure.UpdatedAt.AsTime().Before(latestUpstreamNode.Closure.UpdatedAt.AsTime()) { + *spans = append(*spans, createOperationSpan(latestUpstreamNode.Closure.UpdatedAt, nodeExecution.Closure.UpdatedAt, nodeTeardown)) + } + } + } + + return nil +} + +// parseExecution partitions the workflow execution into a collection of Categorical and Reference Spans which are +// returned as a hierarchical breakdown of the workflow execution. +func (m *MetricsManager) parseExecution(ctx context.Context, execution *admin.Execution, depth int) (*core.Span, error) { + spans := make([]*core.Span, 0) + if depth != 0 { + // retrieve workflow and node executions + workflowRequest := admin.ObjectGetRequest{Id: execution.Closure.WorkflowId} + workflow, err := m.workflowManager.GetWorkflow(ctx, workflowRequest) + if err != nil { + return nil, err + } + + nodeExecutions, err := m.getNodeExecutions(ctx, admin.NodeExecutionListRequest{ + WorkflowExecutionId: execution.Id, + Limit: RequestLimit, + }) + if err != nil { + return nil, err + } + + // check if workflow has started + startNode := nodeExecutions[v1alpha1.StartNodeID] + if startNode.Closure.UpdatedAt == nil || reflect.DeepEqual(startNode.Closure.UpdatedAt, emptyTimestamp) { + spans = append(spans, createOperationSpan(execution.Closure.CreatedAt, execution.Closure.UpdatedAt, workflowSetup)) + } else { + // compute frontend overhead + spans = append(spans, createOperationSpan(execution.Closure.CreatedAt, startNode.Closure.UpdatedAt, workflowSetup)) + + // iterate over nodes and compute overhead + if err := m.parseNodeExecutions(ctx, nodeExecutions, workflow.Closure.CompiledWorkflow, &spans, depth-1); err != nil { + return nil, err + } + + // compute backend overhead + latestUpstreamNode := m.getLatestUpstreamNodeExecution(v1alpha1.EndNodeID, + workflow.Closure.CompiledWorkflow.Primary.Connections.Upstream, nodeExecutions) + if latestUpstreamNode != nil && !execution.Closure.UpdatedAt.AsTime().Before(latestUpstreamNode.Closure.UpdatedAt.AsTime()) { + spans = append(spans, createOperationSpan(latestUpstreamNode.Closure.UpdatedAt, + execution.Closure.UpdatedAt, workflowTeardown)) + } + } + } + + return &core.Span{ + StartTime: execution.Closure.CreatedAt, + EndTime: execution.Closure.UpdatedAt, + Id: &core.Span_WorkflowId{ + WorkflowId: execution.Id, + }, + Spans: spans, + }, nil +} + +// parseGateNodeExecution partitions the GateNode execution into a collection of Categorical and Reference Spans +// which are appended to the provided spans argument. +func (m *MetricsManager) parseGateNodeExecution(_ context.Context, nodeExecution *admin.NodeExecution, spans *[]*core.Span) { + // check if node has started yet + if nodeExecution.Closure.StartedAt == nil || reflect.DeepEqual(nodeExecution.Closure.StartedAt, emptyTimestamp) { + *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, nodeExecution.Closure.UpdatedAt, nodeSetup)) + } else { + // frontend overhead + *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, nodeExecution.Closure.StartedAt, nodeSetup)) + + // check if plugin has completed yet + if nodeExecution.Closure.Duration == nil || reflect.DeepEqual(nodeExecution.Closure.Duration, emptyDuration) { + *spans = append(*spans, createOperationSpan(nodeExecution.Closure.StartedAt, + nodeExecution.Closure.UpdatedAt, nodeIdle)) + } else { + // idle time + nodeEndTime := timestamppb.New(nodeExecution.Closure.StartedAt.AsTime().Add(nodeExecution.Closure.Duration.AsDuration())) + *spans = append(*spans, createOperationSpan(nodeExecution.Closure.StartedAt, nodeEndTime, nodeIdle)) + + // backend overhead + *spans = append(*spans, createOperationSpan(nodeEndTime, nodeExecution.Closure.UpdatedAt, nodeTeardown)) + } + } +} + +// parseLaunchPlanNodeExecution partitions the LaunchPlanNode execution into a collection of Categorical and Reference +// Spans which are appended to the provided spans argument. +func (m *MetricsManager) parseLaunchPlanNodeExecution(ctx context.Context, nodeExecution *admin.NodeExecution, spans *[]*core.Span, depth int) error { + // check if workflow started yet + workflowNode := nodeExecution.Closure.GetWorkflowNodeMetadata() + if workflowNode == nil { + *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, nodeExecution.Closure.UpdatedAt, nodeSetup)) + } else { + // retrieve execution + executionRequest := admin.WorkflowExecutionGetRequest{Id: workflowNode.ExecutionId} + execution, err := m.executionManager.GetExecution(ctx, executionRequest) + if err != nil { + return err + } + + // frontend overhead + *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, execution.Closure.CreatedAt, nodeSetup)) + + // execution + span, err := m.parseExecution(ctx, execution, depth) + if err != nil { + return err + } + + *spans = append(*spans, span) + + // backend overhead + if !nodeExecution.Closure.UpdatedAt.AsTime().Before(execution.Closure.UpdatedAt.AsTime()) { + *spans = append(*spans, createOperationSpan(execution.Closure.UpdatedAt, nodeExecution.Closure.UpdatedAt, nodeTeardown)) + } + } + + return nil +} + +// parseNodeExecution partitions the node execution into a collection of Categorical and Reference Spans which are +// returned as a hierarchical breakdown of the node execution. +func (m *MetricsManager) parseNodeExecution(ctx context.Context, nodeExecution *admin.NodeExecution, node *core.Node, depth int) (*core.Span, error) { + spans := make([]*core.Span, 0) + if depth != 0 { + + // parse node + var err error + switch target := node.Target.(type) { + case *core.Node_BranchNode: + // handle branch node + err = m.parseBranchNodeExecution(ctx, nodeExecution, target.BranchNode, &spans, depth-1) + case *core.Node_GateNode: + // handle gate node + m.parseGateNodeExecution(ctx, nodeExecution, &spans) + case *core.Node_TaskNode: + if nodeExecution.Metadata.IsParentNode { + // handle dynamic node + err = m.parseDynamicNodeExecution(ctx, nodeExecution, &spans, depth-1) + } else { + // handle task node + err = m.parseTaskNodeExecution(ctx, nodeExecution, &spans, depth-1) + } + case *core.Node_WorkflowNode: + switch workflow := target.WorkflowNode.Reference.(type) { + case *core.WorkflowNode_LaunchplanRef: + // handle launch plan + err = m.parseLaunchPlanNodeExecution(ctx, nodeExecution, &spans, depth-1) + case *core.WorkflowNode_SubWorkflowRef: + // handle subworkflow + err = m.parseSubworkflowNodeExecution(ctx, nodeExecution, workflow.SubWorkflowRef, &spans, depth-1) + default: + err = fmt.Errorf("failed to identify workflow node type for node: %+v", target) + } + default: + err = fmt.Errorf("failed to identify node type for node: %+v", target) + } + + if err != nil { + return nil, err + } + } + + return &core.Span{ + StartTime: nodeExecution.Closure.CreatedAt, + EndTime: nodeExecution.Closure.UpdatedAt, + Id: &core.Span_NodeId{ + NodeId: nodeExecution.Id, + }, + Spans: spans, + }, nil +} + +// parseNodeExecutions partitions the node executions into a collection of Categorical and Reference Spans which are +// appended to the provided spans argument. +func (m *MetricsManager) parseNodeExecutions(ctx context.Context, nodeExecutions map[string]*admin.NodeExecution, + compiledWorkflowClosure *core.CompiledWorkflowClosure, spans *[]*core.Span, depth int) error { + + // sort node executions + sortedNodeExecutions := make([]*admin.NodeExecution, 0, len(nodeExecutions)) + for _, nodeExecution := range nodeExecutions { + sortedNodeExecutions = append(sortedNodeExecutions, nodeExecution) + } + sort.Slice(sortedNodeExecutions, func(i, j int) bool { + x := sortedNodeExecutions[i].Closure.CreatedAt.AsTime() + y := sortedNodeExecutions[j].Closure.CreatedAt.AsTime() + return x.Before(y) + }) + + // iterate over sorted node executions + for _, nodeExecution := range sortedNodeExecutions { + specNodeID := nodeExecution.Metadata.SpecNodeId + if specNodeID == v1alpha1.StartNodeID || specNodeID == v1alpha1.EndNodeID { + continue + } + + // get node definition from workflow + var node *core.Node + for _, n := range compiledWorkflowClosure.Primary.Template.Nodes { + if n.Id == specNodeID { + node = n + } + } + + if node == nil { + return fmt.Errorf("failed to discover workflow node '%s' in workflow '%+v'", + specNodeID, compiledWorkflowClosure.Primary.Template.Id) + } + + // parse node execution + nodeExecutionSpan, err := m.parseNodeExecution(ctx, nodeExecution, node, depth) + if err != nil { + return err + } + + // prepend nodeExecution spans with node transition time + latestUpstreamNode := m.getLatestUpstreamNodeExecution(specNodeID, + compiledWorkflowClosure.Primary.Connections.Upstream, nodeExecutions) + if latestUpstreamNode != nil { + nodeExecutionSpan.Spans = append([]*core.Span{createOperationSpan(latestUpstreamNode.Closure.UpdatedAt, + nodeExecution.Closure.CreatedAt, nodeTransition)}, nodeExecutionSpan.Spans...) + } + + *spans = append(*spans, nodeExecutionSpan) + } + + return nil +} + +// parseSubworkflowNodeExecutions partitions the SubworkflowNode execution into a collection of Categorical and +// Reference Spans which are appended to the provided spans argument. +func (m *MetricsManager) parseSubworkflowNodeExecution(ctx context.Context, + nodeExecution *admin.NodeExecution, identifier *core.Identifier, spans *[]*core.Span, depth int) error { + + // retrieve node execution(s) + nodeExecutions, err := m.getNodeExecutions(ctx, admin.NodeExecutionListRequest{ + WorkflowExecutionId: nodeExecution.Id.ExecutionId, + Limit: RequestLimit, + UniqueParentId: nodeExecution.Id.NodeId, + }) + if err != nil { + return err + } + + // check if the subworkflow started + if len(nodeExecutions) == 0 { + *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, nodeExecution.Closure.UpdatedAt, nodeSetup)) + } else { + // frontend overhead + startNode := nodeExecutions[v1alpha1.StartNodeID] + *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, startNode.Closure.UpdatedAt, nodeSetup)) + + // retrieve workflow + workflowRequest := admin.ObjectGetRequest{Id: identifier} + workflow, err := m.workflowManager.GetWorkflow(ctx, workflowRequest) + if err != nil { + return err + } + + // node execution(s) + if err := m.parseNodeExecutions(ctx, nodeExecutions, workflow.Closure.CompiledWorkflow, spans, depth); err != nil { + return err + } + + // backened overhead + latestUpstreamNode := m.getLatestUpstreamNodeExecution(v1alpha1.EndNodeID, + workflow.Closure.CompiledWorkflow.Primary.Connections.Upstream, nodeExecutions) + if latestUpstreamNode != nil && !nodeExecution.Closure.UpdatedAt.AsTime().Before(latestUpstreamNode.Closure.UpdatedAt.AsTime()) { + *spans = append(*spans, createOperationSpan(latestUpstreamNode.Closure.UpdatedAt, nodeExecution.Closure.UpdatedAt, nodeTeardown)) + } + } + + return nil +} + +// parseTaskExecution partitions the task execution into a collection of Categorical and Reference Spans which are +// returned as a hierarchical breakdown of the task execution. +func parseTaskExecution(taskExecution *admin.TaskExecution) *core.Span { + spans := make([]*core.Span, 0) + + // check if plugin has started yet + if taskExecution.Closure.StartedAt == nil || reflect.DeepEqual(taskExecution.Closure.StartedAt, emptyTimestamp) { + spans = append(spans, createOperationSpan(taskExecution.Closure.CreatedAt, taskExecution.Closure.UpdatedAt, taskSetup)) + } else { + // frontend overhead + spans = append(spans, createOperationSpan(taskExecution.Closure.CreatedAt, taskExecution.Closure.StartedAt, taskSetup)) + + // check if plugin has completed yet + if taskExecution.Closure.Duration == nil || reflect.DeepEqual(taskExecution.Closure.Duration, emptyDuration) { + spans = append(spans, createOperationSpan(taskExecution.Closure.StartedAt, taskExecution.Closure.UpdatedAt, taskRuntime)) + } else { + // plugin execution + taskEndTime := timestamppb.New(taskExecution.Closure.StartedAt.AsTime().Add(taskExecution.Closure.Duration.AsDuration())) + spans = append(spans, createOperationSpan(taskExecution.Closure.StartedAt, taskEndTime, taskRuntime)) + + // backend overhead + if !taskExecution.Closure.UpdatedAt.AsTime().Before(taskEndTime.AsTime()) { + spans = append(spans, createOperationSpan(taskEndTime, taskExecution.Closure.UpdatedAt, taskTeardown)) + } + } + } + + return &core.Span{ + StartTime: taskExecution.Closure.CreatedAt, + EndTime: taskExecution.Closure.UpdatedAt, + Id: &core.Span_TaskId{ + TaskId: taskExecution.Id, + }, + Spans: spans, + } +} + +// parseTaskExecutions partitions the task executions into a collection of Categorical and Reference Spans which are +// appended to the provided spans argument. +func parseTaskExecutions(taskExecutions []*admin.TaskExecution, spans *[]*core.Span, depth int) { + // sort task executions + sort.Slice(taskExecutions, func(i, j int) bool { + x := taskExecutions[i].Closure.CreatedAt.AsTime() + y := taskExecutions[j].Closure.CreatedAt.AsTime() + return x.Before(y) + }) + + // iterate over task executions + for index, taskExecution := range taskExecutions { + if index > 0 { + *spans = append(*spans, createOperationSpan(taskExecutions[index-1].Closure.UpdatedAt, taskExecution.Closure.CreatedAt, nodeReset)) + } + + if depth != 0 { + *spans = append(*spans, parseTaskExecution(taskExecution)) + } + } +} + +// parseTaskNodeExecutions partitions the TaskNode execution into a collection of Categorical and Reference Spans which +// are appended to the provided spans argument. +func (m *MetricsManager) parseTaskNodeExecution(ctx context.Context, nodeExecution *admin.NodeExecution, spans *[]*core.Span, depth int) error { + // retrieve task executions + taskExecutions, err := m.getTaskExecutions(ctx, admin.TaskExecutionListRequest{ + NodeExecutionId: nodeExecution.Id, + Limit: RequestLimit, + }) + if err != nil { + return err + } + + // if no task executions then everything is execution overhead + if len(taskExecutions) == 0 { + *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, nodeExecution.Closure.UpdatedAt, nodeSetup)) + } else { + // frontend overhead + *spans = append(*spans, createOperationSpan(nodeExecution.Closure.CreatedAt, taskExecutions[0].Closure.CreatedAt, nodeSetup)) + + // parse task executions + parseTaskExecutions(taskExecutions, spans, depth) + + // backend overhead + lastTask := taskExecutions[len(taskExecutions)-1] + if !nodeExecution.Closure.UpdatedAt.AsTime().Before(lastTask.Closure.UpdatedAt.AsTime()) { + *spans = append(*spans, createOperationSpan(taskExecutions[len(taskExecutions)-1].Closure.UpdatedAt, + nodeExecution.Closure.UpdatedAt, nodeTeardown)) + } + } + + return nil +} + +// GetExecutionMetrics returns a Span hierarchically breaking down the workflow execution into a collection of +// Categorical and Reference Spans. +func (m *MetricsManager) GetExecutionMetrics(ctx context.Context, + request admin.WorkflowExecutionGetMetricsRequest) (*admin.WorkflowExecutionGetMetricsResponse, error) { + + // retrieve workflow execution + executionRequest := admin.WorkflowExecutionGetRequest{Id: request.Id} + execution, err := m.executionManager.GetExecution(ctx, executionRequest) + if err != nil { + return nil, err + } + + span, err := m.parseExecution(ctx, execution, int(request.Depth)) + if err != nil { + return nil, err + } + + return &admin.WorkflowExecutionGetMetricsResponse{Span: span}, nil +} + +// NewMetricsManager returns a new MetricsManager constructed with the provided arguments. +func NewMetricsManager( + workflowManager interfaces.WorkflowInterface, + executionManager interfaces.ExecutionInterface, + nodeExecutionManager interfaces.NodeExecutionInterface, + taskExecutionManager interfaces.TaskExecutionInterface, + scope promutils.Scope) interfaces.MetricsInterface { + metrics := metrics{ + Scope: scope, + } + + return &MetricsManager{ + workflowManager: workflowManager, + executionManager: executionManager, + nodeExecutionManager: nodeExecutionManager, + taskExecutionManager: taskExecutionManager, + metrics: metrics, + } +} diff --git a/pkg/manager/impl/metrics_manager_test.go b/pkg/manager/impl/metrics_manager_test.go new file mode 100644 index 000000000..2958285b8 --- /dev/null +++ b/pkg/manager/impl/metrics_manager_test.go @@ -0,0 +1,1082 @@ +package impl + +import ( + "context" + "reflect" + "testing" + + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" + + "github.com/flyteorg/flyteadmin/pkg/manager/interfaces" + "github.com/flyteorg/flyteadmin/pkg/manager/mocks" + + "github.com/golang/protobuf/ptypes/duration" + "github.com/golang/protobuf/ptypes/timestamp" + + "github.com/stretchr/testify/assert" +) + +var ( + baseDuration = &duration.Duration{ + Seconds: 400, + Nanos: 0, + } + baseTimestamp = ×tamp.Timestamp{ + Seconds: 643852800, + Nanos: 0, + } +) + +func addTimestamp(ts *timestamp.Timestamp, seconds int64) *timestamp.Timestamp { + return ×tamp.Timestamp{ + Seconds: ts.Seconds + seconds, + Nanos: ts.Nanos, + } +} + +func getMockExecutionManager(execution *admin.Execution) interfaces.ExecutionInterface { + mockExecutionManager := mocks.MockExecutionManager{} + mockExecutionManager.SetGetCallback( + func(ctx context.Context, request admin.WorkflowExecutionGetRequest) (*admin.Execution, error) { + return execution, nil + }) + + return &mockExecutionManager +} + +func getMockNodeExecutionManager(nodeExecutions []*admin.NodeExecution, + dynamicWorkflow *admin.DynamicWorkflowNodeMetadata) interfaces.NodeExecutionInterface { + + mockNodeExecutionManager := mocks.MockNodeExecutionManager{} + mockNodeExecutionManager.SetListNodeExecutionsFunc( + func(ctx context.Context, request admin.NodeExecutionListRequest) (*admin.NodeExecutionList, error) { + return &admin.NodeExecutionList{ + NodeExecutions: nodeExecutions, + }, nil + }) + mockNodeExecutionManager.SetGetNodeExecutionDataFunc( + func(ctx context.Context, request admin.NodeExecutionGetDataRequest) (*admin.NodeExecutionGetDataResponse, error) { + return &admin.NodeExecutionGetDataResponse{ + DynamicWorkflow: dynamicWorkflow, + }, nil + }) + + return &mockNodeExecutionManager +} + +func getMockTaskExecutionManager(taskExecutions []*admin.TaskExecution) interfaces.TaskExecutionInterface { + mockTaskExecutionManager := mocks.MockTaskExecutionManager{} + mockTaskExecutionManager.SetListTaskExecutionsCallback( + func(ctx context.Context, request admin.TaskExecutionListRequest) (*admin.TaskExecutionList, error) { + return &admin.TaskExecutionList{ + TaskExecutions: taskExecutions, + }, nil + }) + + return &mockTaskExecutionManager +} + +func getMockWorkflowManager(workflow *admin.Workflow) interfaces.WorkflowInterface { + mockWorkflowManager := mocks.MockWorkflowManager{} + mockWorkflowManager.SetGetCallback( + func(ctx context.Context, request admin.ObjectGetRequest) (*admin.Workflow, error) { + return workflow, nil + }) + + return &mockWorkflowManager +} + +func parseSpans(spans []*core.Span) (map[string][]int64, int) { + operationDurations := make(map[string][]int64) + referenceCount := 0 + for _, span := range spans { + switch id := span.Id.(type) { + case *core.Span_OperationId: + operationID := id.OperationId + duration := span.EndTime.Seconds - span.StartTime.Seconds + if array, exists := operationDurations[operationID]; exists { + operationDurations[operationID] = append(array, duration) + } else { + operationDurations[operationID] = []int64{duration} + } + default: + referenceCount++ + } + } + + return operationDurations, referenceCount +} + +func TestParseBranchNodeExecution(t *testing.T) { + tests := []struct { + name string + nodeExecution *admin.NodeExecution + nodeExecutions []*admin.NodeExecution + operationDurations map[string][]int64 + referenceCount int + }{ + { + "NotStarted", + &admin.NodeExecution{ + Id: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{}, + }, + Closure: &admin.NodeExecutionClosure{ + CreatedAt: baseTimestamp, + StartedAt: emptyTimestamp, + Duration: emptyDuration, + UpdatedAt: addTimestamp(baseTimestamp, 5), + }, + }, + nil, + map[string][]int64{ + nodeSetup: []int64{5}, + }, + 0, + }, + { + "Running", + &admin.NodeExecution{ + Id: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{}, + }, + Closure: &admin.NodeExecutionClosure{ + CreatedAt: baseTimestamp, + StartedAt: emptyTimestamp, + Duration: emptyDuration, + UpdatedAt: baseTimestamp, + }, + }, + []*admin.NodeExecution{ + &admin.NodeExecution{ + Metadata: &admin.NodeExecutionMetaData{ + SpecNodeId: "foo", + }, + Closure: &admin.NodeExecutionClosure{ + CreatedAt: addTimestamp(baseTimestamp, 10), + StartedAt: addTimestamp(baseTimestamp, 15), + Duration: baseDuration, + UpdatedAt: addTimestamp(baseTimestamp, 430), + }, + }, + }, + map[string][]int64{ + nodeSetup: []int64{10}, + }, + 1, + }, + { + "Completed", + &admin.NodeExecution{ + Id: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{}, + }, + Closure: &admin.NodeExecutionClosure{ + CreatedAt: baseTimestamp, + StartedAt: emptyTimestamp, + Duration: emptyDuration, + UpdatedAt: addTimestamp(baseTimestamp, 450), + }, + }, + []*admin.NodeExecution{ + &admin.NodeExecution{ + Metadata: &admin.NodeExecutionMetaData{ + SpecNodeId: "foo", + }, + Closure: &admin.NodeExecutionClosure{ + CreatedAt: addTimestamp(baseTimestamp, 10), + StartedAt: addTimestamp(baseTimestamp, 15), + Duration: baseDuration, + UpdatedAt: addTimestamp(baseTimestamp, 430), + }, + }, + }, + map[string][]int64{ + nodeSetup: []int64{10}, + nodeTeardown: []int64{20}, + }, + 1, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // initialize mocks + mockNodeExecutionManager := getMockNodeExecutionManager(test.nodeExecutions, nil) + mockTaskExecutionManager := getMockTaskExecutionManager([]*admin.TaskExecution{}) + metricsManager := MetricsManager{ + nodeExecutionManager: mockNodeExecutionManager, + taskExecutionManager: mockTaskExecutionManager, + } + + // parse node execution + branchNode := &core.BranchNode{ + IfElse: &core.IfElseBlock{ + Case: &core.IfBlock{ + ThenNode: &core.Node{ + Id: "bar", + }, + }, + Other: []*core.IfBlock{ + &core.IfBlock{ + ThenNode: &core.Node{ + Id: "baz", + }, + }, + }, + Default: &core.IfElseBlock_ElseNode{ + ElseNode: &core.Node{ + Id: "foo", + Target: &core.Node_TaskNode{}, + }, + }, + }, + } + + spans := make([]*core.Span, 0) + err := metricsManager.parseBranchNodeExecution(context.TODO(), test.nodeExecution, branchNode, &spans, -1) + assert.Nil(t, err) + + // validate spans + operationDurations, referenceCount := parseSpans(spans) + assert.True(t, reflect.DeepEqual(test.operationDurations, operationDurations)) + assert.Equal(t, test.referenceCount, referenceCount) + }) + } +} + +func TestParseDynamicNodeExecution(t *testing.T) { + tests := []struct { + name string + nodeExecution *admin.NodeExecution + taskExecutions []*admin.TaskExecution + nodeExecutions []*admin.NodeExecution + operationDurations map[string][]int64 + referenceCount int + }{ + { + "NotStarted", + &admin.NodeExecution{ + Closure: &admin.NodeExecutionClosure{ + CreatedAt: baseTimestamp, + StartedAt: emptyTimestamp, + Duration: emptyDuration, + UpdatedAt: addTimestamp(baseTimestamp, 5), + }, + }, + nil, + nil, + map[string][]int64{ + nodeSetup: []int64{5}, + }, + 0, + }, + { + "TaskRunning", + &admin.NodeExecution{ + Id: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{}, + }, + Closure: &admin.NodeExecutionClosure{ + CreatedAt: baseTimestamp, + StartedAt: emptyTimestamp, + Duration: emptyDuration, + UpdatedAt: baseTimestamp, + }, + }, + []*admin.TaskExecution{ + &admin.TaskExecution{ + Closure: &admin.TaskExecutionClosure{ + CreatedAt: addTimestamp(baseTimestamp, 10), + StartedAt: addTimestamp(baseTimestamp, 15), + Duration: baseDuration, + UpdatedAt: addTimestamp(baseTimestamp, 420), + }, + }, + }, + nil, + map[string][]int64{ + nodeSetup: []int64{10}, + }, + 1, + }, + { + "NodesRunning", + &admin.NodeExecution{ + Id: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{}, + }, + Closure: &admin.NodeExecutionClosure{ + CreatedAt: baseTimestamp, + StartedAt: emptyTimestamp, + Duration: emptyDuration, + UpdatedAt: baseTimestamp, + }, + }, + []*admin.TaskExecution{ + &admin.TaskExecution{ + Closure: &admin.TaskExecutionClosure{ + CreatedAt: addTimestamp(baseTimestamp, 10), + StartedAt: addTimestamp(baseTimestamp, 15), + Duration: baseDuration, + UpdatedAt: addTimestamp(baseTimestamp, 420), + }, + }, + }, + []*admin.NodeExecution{ + &admin.NodeExecution{ + Metadata: &admin.NodeExecutionMetaData{ + SpecNodeId: "start-node", + }, + Closure: &admin.NodeExecutionClosure{ + CreatedAt: addTimestamp(baseTimestamp, 435), + StartedAt: emptyTimestamp, + Duration: emptyDuration, + UpdatedAt: addTimestamp(baseTimestamp, 435), + }, + }, + &admin.NodeExecution{ + Metadata: &admin.NodeExecutionMetaData{ + SpecNodeId: "foo", + }, + Closure: &admin.NodeExecutionClosure{ + CreatedAt: addTimestamp(baseTimestamp, 445), + StartedAt: addTimestamp(baseTimestamp, 460), + Duration: baseDuration, + UpdatedAt: addTimestamp(baseTimestamp, 880), + }, + }, + }, + map[string][]int64{ + nodeSetup: []int64{10}, + nodeReset: []int64{15}, + }, + 2, + }, + { + "Completed", + &admin.NodeExecution{ + Id: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{}, + }, + Closure: &admin.NodeExecutionClosure{ + CreatedAt: baseTimestamp, + StartedAt: emptyTimestamp, + Duration: emptyDuration, + UpdatedAt: addTimestamp(baseTimestamp, 900), + }, + }, + []*admin.TaskExecution{ + &admin.TaskExecution{ + Closure: &admin.TaskExecutionClosure{ + CreatedAt: addTimestamp(baseTimestamp, 10), + StartedAt: addTimestamp(baseTimestamp, 15), + Duration: baseDuration, + UpdatedAt: addTimestamp(baseTimestamp, 420), + }, + }, + }, + []*admin.NodeExecution{ + &admin.NodeExecution{ + Metadata: &admin.NodeExecutionMetaData{ + SpecNodeId: "start-node", + }, + Closure: &admin.NodeExecutionClosure{ + CreatedAt: addTimestamp(baseTimestamp, 435), + StartedAt: emptyTimestamp, + Duration: emptyDuration, + UpdatedAt: addTimestamp(baseTimestamp, 435), + }, + }, + &admin.NodeExecution{ + Metadata: &admin.NodeExecutionMetaData{ + SpecNodeId: "foo", + }, + Closure: &admin.NodeExecutionClosure{ + CreatedAt: addTimestamp(baseTimestamp, 445), + StartedAt: addTimestamp(baseTimestamp, 460), + Duration: baseDuration, + UpdatedAt: addTimestamp(baseTimestamp, 880), + }, + }, + }, + map[string][]int64{ + nodeSetup: []int64{10}, + nodeReset: []int64{15}, + nodeTeardown: []int64{20}, + }, + 2, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // initialize mocks + mockNodeExecutionManager := getMockNodeExecutionManager( + test.nodeExecutions, + &admin.DynamicWorkflowNodeMetadata{ + CompiledWorkflow: &core.CompiledWorkflowClosure{ + Primary: &core.CompiledWorkflow{ + Connections: &core.ConnectionSet{ + Upstream: map[string]*core.ConnectionSet_IdList{ + "foo": &core.ConnectionSet_IdList{ + Ids: []string{"start-node"}, + }, + "end-node": &core.ConnectionSet_IdList{ + Ids: []string{"foo"}, + }, + }, + }, + Template: &core.WorkflowTemplate{ + Nodes: []*core.Node{ + &core.Node{ + Id: "foo", + Target: &core.Node_TaskNode{}, + }, + }, + }, + }, + }, + }) + mockTaskExecutionManager := getMockTaskExecutionManager(test.taskExecutions) + metricsManager := MetricsManager{ + nodeExecutionManager: mockNodeExecutionManager, + taskExecutionManager: mockTaskExecutionManager, + } + + // parse node execution + spans := make([]*core.Span, 0) + err := metricsManager.parseDynamicNodeExecution(context.TODO(), test.nodeExecution, &spans, -1) + assert.Nil(t, err) + + // validate spans + operationDurations, referenceCount := parseSpans(spans) + assert.True(t, reflect.DeepEqual(test.operationDurations, operationDurations)) + assert.Equal(t, test.referenceCount, referenceCount) + }) + } +} + +func TestParseGateNodeExecution(t *testing.T) { + tests := []struct { + name string + nodeExecution *admin.NodeExecution + operationDurations map[string][]int64 + }{ + { + "NotStarted", + &admin.NodeExecution{ + Closure: &admin.NodeExecutionClosure{ + CreatedAt: baseTimestamp, + StartedAt: emptyTimestamp, + Duration: emptyDuration, + UpdatedAt: addTimestamp(baseTimestamp, 5), + }, + }, + map[string][]int64{ + nodeSetup: []int64{5}, + }, + }, + { + "Running", + &admin.NodeExecution{ + Closure: &admin.NodeExecutionClosure{ + CreatedAt: baseTimestamp, + StartedAt: addTimestamp(baseTimestamp, 10), + Duration: emptyDuration, + UpdatedAt: addTimestamp(baseTimestamp, 15), + }, + }, + map[string][]int64{ + nodeSetup: []int64{10}, + nodeIdle: []int64{5}, + }, + }, + { + "Completed", + &admin.NodeExecution{ + Closure: &admin.NodeExecutionClosure{ + CreatedAt: baseTimestamp, + StartedAt: addTimestamp(baseTimestamp, 10), + Duration: baseDuration, + UpdatedAt: addTimestamp(baseTimestamp, 425), + }, + }, + map[string][]int64{ + nodeSetup: []int64{10}, + nodeIdle: []int64{400}, + nodeTeardown: []int64{15}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // initialize mocks + metricsManager := MetricsManager{} + + // parse node execution + spans := make([]*core.Span, 0) + metricsManager.parseGateNodeExecution(context.TODO(), test.nodeExecution, &spans) + + // validate spans + operationDurations, _ := parseSpans(spans) + assert.True(t, reflect.DeepEqual(test.operationDurations, operationDurations)) + }) + } +} + +func TestParseLaunchPlanNodeExecution(t *testing.T) { + tests := []struct { + name string + nodeExecution *admin.NodeExecution + execution *admin.Execution + operationDurations map[string][]int64 + referenceCount int + }{ + { + "NotStarted", + &admin.NodeExecution{ + Closure: &admin.NodeExecutionClosure{ + CreatedAt: baseTimestamp, + StartedAt: emptyTimestamp, + Duration: emptyDuration, + UpdatedAt: addTimestamp(baseTimestamp, 5), + }, + }, + nil, + map[string][]int64{ + nodeSetup: []int64{5}, + }, + 0, + }, + { + "Running", + &admin.NodeExecution{ + Closure: &admin.NodeExecutionClosure{ + CreatedAt: baseTimestamp, + StartedAt: emptyTimestamp, + Duration: emptyDuration, + UpdatedAt: baseTimestamp, + TargetMetadata: &admin.NodeExecutionClosure_WorkflowNodeMetadata{ + WorkflowNodeMetadata: &admin.WorkflowNodeMetadata{ + ExecutionId: &core.WorkflowExecutionIdentifier{}, + }, + }, + }, + }, + &admin.Execution{ + Closure: &admin.ExecutionClosure{ + CreatedAt: addTimestamp(baseTimestamp, 10), + StartedAt: addTimestamp(baseTimestamp, 15), + Duration: emptyDuration, + UpdatedAt: addTimestamp(baseTimestamp, 15), + }, + }, + map[string][]int64{ + nodeSetup: []int64{10}, + }, + 1, + }, + { + "Completed", + &admin.NodeExecution{ + Closure: &admin.NodeExecutionClosure{ + CreatedAt: baseTimestamp, + StartedAt: emptyTimestamp, + Duration: emptyDuration, + UpdatedAt: addTimestamp(baseTimestamp, 440), + TargetMetadata: &admin.NodeExecutionClosure_WorkflowNodeMetadata{ + WorkflowNodeMetadata: &admin.WorkflowNodeMetadata{ + ExecutionId: &core.WorkflowExecutionIdentifier{}, + }, + }, + }, + }, + &admin.Execution{ + Closure: &admin.ExecutionClosure{ + CreatedAt: addTimestamp(baseTimestamp, 10), + StartedAt: addTimestamp(baseTimestamp, 15), + Duration: emptyDuration, + UpdatedAt: addTimestamp(baseTimestamp, 425), + }, + }, + map[string][]int64{ + nodeSetup: []int64{10}, + nodeTeardown: []int64{15}, + }, + 1, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // initialize mocks + mockExecutionManager := getMockExecutionManager(test.execution) + mockNodeExecutionManager := getMockNodeExecutionManager( + []*admin.NodeExecution{ + &admin.NodeExecution{ + Metadata: &admin.NodeExecutionMetaData{ + SpecNodeId: "start-node", + }, + Closure: &admin.NodeExecutionClosure{ + CreatedAt: addTimestamp(baseTimestamp, 10), + StartedAt: emptyTimestamp, + Duration: emptyDuration, + UpdatedAt: addTimestamp(baseTimestamp, 10), + }, + }, + &admin.NodeExecution{ + Metadata: &admin.NodeExecutionMetaData{ + SpecNodeId: "foo", + }, + Closure: &admin.NodeExecutionClosure{ + CreatedAt: addTimestamp(baseTimestamp, 15), + StartedAt: addTimestamp(baseTimestamp, 20), + Duration: baseDuration, + UpdatedAt: addTimestamp(baseTimestamp, 435), + }, + }, + }, nil) + mockTaskExecutionManager := getMockTaskExecutionManager([]*admin.TaskExecution{}) + mockWorkflowManager := getMockWorkflowManager( + &admin.Workflow{ + Closure: &admin.WorkflowClosure{ + CompiledWorkflow: &core.CompiledWorkflowClosure{ + Primary: &core.CompiledWorkflow{ + Connections: &core.ConnectionSet{ + Upstream: map[string]*core.ConnectionSet_IdList{ + "foo": &core.ConnectionSet_IdList{ + Ids: []string{"start-node"}, + }, + "end-node": &core.ConnectionSet_IdList{ + Ids: []string{"foo"}, + }, + }, + }, + Template: &core.WorkflowTemplate{ + Nodes: []*core.Node{ + &core.Node{ + Id: "foo", + Target: &core.Node_TaskNode{}, + }, + }, + }, + }, + }, + }, + }) + metricsManager := MetricsManager{ + executionManager: mockExecutionManager, + nodeExecutionManager: mockNodeExecutionManager, + taskExecutionManager: mockTaskExecutionManager, + workflowManager: mockWorkflowManager, + } + + // parse node execution + spans := make([]*core.Span, 0) + err := metricsManager.parseLaunchPlanNodeExecution(context.TODO(), test.nodeExecution, &spans, -1) + assert.Nil(t, err) + + // validate spans + operationDurations, referenceCount := parseSpans(spans) + assert.True(t, reflect.DeepEqual(test.operationDurations, operationDurations)) + assert.Equal(t, test.referenceCount, referenceCount) + }) + } +} + +func TestParseSubworkflowNodeExecution(t *testing.T) { + tests := []struct { + name string + nodeExecution *admin.NodeExecution + nodeExecutions []*admin.NodeExecution + operationDurations map[string][]int64 + referenceCount int + }{ + { + "NotStarted", + &admin.NodeExecution{ + Id: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{}, + }, + Closure: &admin.NodeExecutionClosure{ + CreatedAt: baseTimestamp, + StartedAt: emptyTimestamp, + Duration: emptyDuration, + UpdatedAt: addTimestamp(baseTimestamp, 5), + }, + }, + nil, + map[string][]int64{ + nodeSetup: []int64{5}, + }, + 0, + }, + { + "Running", + &admin.NodeExecution{ + Id: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{}, + }, + Closure: &admin.NodeExecutionClosure{ + CreatedAt: baseTimestamp, + StartedAt: emptyTimestamp, + Duration: emptyDuration, + UpdatedAt: baseTimestamp, + }, + }, + []*admin.NodeExecution{ + &admin.NodeExecution{ + Metadata: &admin.NodeExecutionMetaData{ + SpecNodeId: "start-node", + }, + Closure: &admin.NodeExecutionClosure{ + CreatedAt: addTimestamp(baseTimestamp, 10), + StartedAt: emptyTimestamp, + Duration: emptyDuration, + UpdatedAt: addTimestamp(baseTimestamp, 10), + }, + }, + &admin.NodeExecution{ + Metadata: &admin.NodeExecutionMetaData{ + SpecNodeId: "foo", + }, + Closure: &admin.NodeExecutionClosure{ + CreatedAt: addTimestamp(baseTimestamp, 15), + StartedAt: addTimestamp(baseTimestamp, 20), + Duration: baseDuration, + UpdatedAt: addTimestamp(baseTimestamp, 435), + }, + }, + }, + map[string][]int64{ + nodeSetup: []int64{10}, + }, + 1, + }, + { + "Completed", + &admin.NodeExecution{ + Id: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{}, + }, + Closure: &admin.NodeExecutionClosure{ + CreatedAt: baseTimestamp, + StartedAt: emptyTimestamp, + Duration: emptyDuration, + UpdatedAt: addTimestamp(baseTimestamp, 455), + }, + }, + []*admin.NodeExecution{ + &admin.NodeExecution{ + Metadata: &admin.NodeExecutionMetaData{ + SpecNodeId: "start-node", + }, + Closure: &admin.NodeExecutionClosure{ + CreatedAt: addTimestamp(baseTimestamp, 10), + StartedAt: emptyTimestamp, + Duration: emptyDuration, + UpdatedAt: addTimestamp(baseTimestamp, 10), + }, + }, + &admin.NodeExecution{ + Metadata: &admin.NodeExecutionMetaData{ + SpecNodeId: "foo", + }, + Closure: &admin.NodeExecutionClosure{ + CreatedAt: addTimestamp(baseTimestamp, 15), + StartedAt: addTimestamp(baseTimestamp, 20), + Duration: baseDuration, + UpdatedAt: addTimestamp(baseTimestamp, 435), + }, + }, + }, + map[string][]int64{ + nodeSetup: []int64{10}, + nodeTeardown: []int64{20}, + }, + 1, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // initialize mocks + mockNodeExecutionManager := getMockNodeExecutionManager(test.nodeExecutions, nil) + mockTaskExecutionManager := getMockTaskExecutionManager([]*admin.TaskExecution{}) + mockWorkflowManager := getMockWorkflowManager( + &admin.Workflow{ + Closure: &admin.WorkflowClosure{ + CompiledWorkflow: &core.CompiledWorkflowClosure{ + Primary: &core.CompiledWorkflow{ + Connections: &core.ConnectionSet{ + Upstream: map[string]*core.ConnectionSet_IdList{ + "foo": &core.ConnectionSet_IdList{ + Ids: []string{"start-node"}, + }, + "end-node": &core.ConnectionSet_IdList{ + Ids: []string{"foo"}, + }, + }, + }, + Template: &core.WorkflowTemplate{ + Nodes: []*core.Node{ + &core.Node{ + Id: "foo", + Target: &core.Node_TaskNode{}, + }, + }, + }, + }, + }, + }, + }) + metricsManager := MetricsManager{ + nodeExecutionManager: mockNodeExecutionManager, + taskExecutionManager: mockTaskExecutionManager, + workflowManager: mockWorkflowManager, + } + + // parse node execution + spans := make([]*core.Span, 0) + err := metricsManager.parseSubworkflowNodeExecution(context.TODO(), test.nodeExecution, &core.Identifier{}, &spans, -1) + assert.Nil(t, err) + + // validate spans + operationDurations, referenceCount := parseSpans(spans) + assert.True(t, reflect.DeepEqual(test.operationDurations, operationDurations)) + assert.Equal(t, test.referenceCount, referenceCount) + }) + } +} + +func TestParseTaskExecution(t *testing.T) { + tests := []struct { + name string + taskExecution *admin.TaskExecution + operationDurations map[string][]int64 + }{ + { + "NotStarted", + &admin.TaskExecution{ + Closure: &admin.TaskExecutionClosure{ + CreatedAt: baseTimestamp, + StartedAt: emptyTimestamp, + Duration: emptyDuration, + UpdatedAt: addTimestamp(baseTimestamp, 5), + }, + }, + map[string][]int64{ + taskSetup: []int64{5}, + }, + }, + { + "Running", + &admin.TaskExecution{ + Closure: &admin.TaskExecutionClosure{ + CreatedAt: baseTimestamp, + StartedAt: addTimestamp(baseTimestamp, 5), + Duration: emptyDuration, + UpdatedAt: addTimestamp(baseTimestamp, 605), + }, + }, + map[string][]int64{ + taskSetup: []int64{5}, + taskRuntime: []int64{600}, + }, + }, + { + "Completed", + &admin.TaskExecution{ + Closure: &admin.TaskExecutionClosure{ + CreatedAt: baseTimestamp, + StartedAt: addTimestamp(baseTimestamp, 5), + Duration: baseDuration, + UpdatedAt: addTimestamp(baseTimestamp, 415), + }, + }, + map[string][]int64{ + taskSetup: []int64{5}, + taskRuntime: []int64{400}, + taskTeardown: []int64{10}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // parse task execution + span := parseTaskExecution(test.taskExecution) + _, ok := span.Id.(*core.Span_TaskId) + assert.True(t, ok) + + // validate spans + operationDurations, referenceCount := parseSpans(span.Spans) + assert.True(t, reflect.DeepEqual(test.operationDurations, operationDurations)) + assert.Equal(t, 0, referenceCount) + }) + } +} + +func TestParseTaskExecutions(t *testing.T) { + tests := []struct { + name string + taskExecutions []*admin.TaskExecution + operationDurations map[string][]int64 + referenceCount int + }{ + { + "SingleAttempt", + []*admin.TaskExecution{ + &admin.TaskExecution{ + Closure: &admin.TaskExecutionClosure{ + CreatedAt: baseTimestamp, + StartedAt: addTimestamp(baseTimestamp, 5), + Duration: baseDuration, + UpdatedAt: addTimestamp(baseTimestamp, 415), + }, + }, + }, + map[string][]int64{}, + 1, + }, + { + "MultipleAttempts", + []*admin.TaskExecution{ + &admin.TaskExecution{ + Closure: &admin.TaskExecutionClosure{ + CreatedAt: baseTimestamp, + StartedAt: addTimestamp(baseTimestamp, 5), + Duration: baseDuration, + UpdatedAt: addTimestamp(baseTimestamp, 605), + }, + }, + &admin.TaskExecution{ + Closure: &admin.TaskExecutionClosure{ + CreatedAt: addTimestamp(baseTimestamp, 625), + StartedAt: addTimestamp(baseTimestamp, 630), + Duration: emptyDuration, + UpdatedAt: addTimestamp(baseTimestamp, 630), + }, + }, + }, + map[string][]int64{ + nodeReset: []int64{20}, + }, + 2, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // parse task executions + spans := make([]*core.Span, 0) + parseTaskExecutions(test.taskExecutions, &spans, -1) + + // validate spans + operationDurations, referenceCount := parseSpans(spans) + assert.True(t, reflect.DeepEqual(test.operationDurations, operationDurations)) + assert.Equal(t, test.referenceCount, referenceCount) + }) + } +} + +func TestParseTaskNodeExecution(t *testing.T) { + tests := []struct { + name string + nodeExecution *admin.NodeExecution + taskExecutions []*admin.TaskExecution + operationDurations map[string][]int64 + referenceCount int + }{ + { + "NotStarted", + &admin.NodeExecution{ + Closure: &admin.NodeExecutionClosure{ + CreatedAt: baseTimestamp, + StartedAt: emptyTimestamp, + Duration: emptyDuration, + UpdatedAt: addTimestamp(baseTimestamp, 5), + }, + }, + nil, + map[string][]int64{ + nodeSetup: []int64{5}, + }, + 0, + }, + { + "Running", + &admin.NodeExecution{ + Closure: &admin.NodeExecutionClosure{ + CreatedAt: baseTimestamp, + StartedAt: emptyTimestamp, + Duration: emptyDuration, + UpdatedAt: addTimestamp(baseTimestamp, 10), + }, + }, + []*admin.TaskExecution{ + &admin.TaskExecution{ + Closure: &admin.TaskExecutionClosure{ + CreatedAt: addTimestamp(baseTimestamp, 10), + StartedAt: addTimestamp(baseTimestamp, 15), + Duration: baseDuration, + UpdatedAt: addTimestamp(baseTimestamp, 420), + }, + }, + }, + map[string][]int64{ + nodeSetup: []int64{10}, + }, + 1, + }, + { + "Completed", + &admin.NodeExecution{ + Closure: &admin.NodeExecutionClosure{ + CreatedAt: baseTimestamp, + StartedAt: emptyTimestamp, + Duration: emptyDuration, + UpdatedAt: addTimestamp(baseTimestamp, 435), + }, + }, + []*admin.TaskExecution{ + &admin.TaskExecution{ + Closure: &admin.TaskExecutionClosure{ + CreatedAt: addTimestamp(baseTimestamp, 10), + StartedAt: addTimestamp(baseTimestamp, 15), + Duration: baseDuration, + UpdatedAt: addTimestamp(baseTimestamp, 420), + }, + }, + }, + map[string][]int64{ + nodeSetup: []int64{10}, + nodeTeardown: []int64{15}, + }, + 1, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // initialize mocks + mockTaskExecutionManager := getMockTaskExecutionManager(test.taskExecutions) + metricsManager := MetricsManager{ + taskExecutionManager: mockTaskExecutionManager, + } + + // parse node execution + spans := make([]*core.Span, 0) + err := metricsManager.parseTaskNodeExecution(context.TODO(), test.nodeExecution, &spans, -1) + assert.Nil(t, err) + + // validate spans + operationDurations, referenceCount := parseSpans(spans) + assert.True(t, reflect.DeepEqual(test.operationDurations, operationDurations)) + assert.Equal(t, test.referenceCount, referenceCount) + }) + } +} diff --git a/pkg/manager/impl/node_execution_manager.go b/pkg/manager/impl/node_execution_manager.go index ca6d71e03..ae5a7bb40 100644 --- a/pkg/manager/impl/node_execution_manager.go +++ b/pkg/manager/impl/node_execution_manager.go @@ -308,7 +308,7 @@ func (m *NodeExecutionManager) CreateNodeEvent(ctx context.Context, request admi // Handles making additional database calls, if necessary, to populate IsParent & IsDynamic data using the historical pattern of // preloading child node executions. Otherwise, simply calls transform on the input model. func (m *NodeExecutionManager) transformNodeExecutionModel(ctx context.Context, nodeExecutionModel models.NodeExecution, - nodeExecutionID *core.NodeExecutionIdentifier) (*admin.NodeExecution, error) { + nodeExecutionID *core.NodeExecutionIdentifier, opts *transformers.ExecutionTransformerOptions) (*admin.NodeExecution, error) { internalData, err := transformers.GetNodeExecutionInternalData(nodeExecutionModel.InternalData) if err != nil { return nil, err @@ -323,7 +323,7 @@ func (m *NodeExecutionManager) transformNodeExecutionModel(ctx context.Context, } } - nodeExecution, err := transformers.FromNodeExecutionModel(nodeExecutionModel) + nodeExecution, err := transformers.FromNodeExecutionModel(nodeExecutionModel, opts) if err != nil { logger.Debugf(ctx, "failed to transform node execution model [%+v] to proto with err: %v", nodeExecutionID, err) return nil, err @@ -341,7 +341,7 @@ func (m *NodeExecutionManager) transformNodeExecutionModelList(ctx context.Conte Name: nodeExecutionModel.Name, }, NodeId: nodeExecutionModel.NodeID, - }) + }, transformers.ListExecutionTransformerOptions) if err != nil { return nil, err } @@ -362,7 +362,7 @@ func (m *NodeExecutionManager) GetNodeExecution( request.Id, err) return nil, err } - nodeExecution, err := m.transformNodeExecutionModel(ctx, *nodeExecutionModel, request.Id) + nodeExecution, err := m.transformNodeExecutionModel(ctx, *nodeExecutionModel, request.Id, nil) if err != nil { return nil, err } @@ -498,7 +498,7 @@ func (m *NodeExecutionManager) GetNodeExecutionData( return nil, err } - nodeExecution, err := transformers.FromNodeExecutionModel(*nodeExecutionModel) + nodeExecution, err := transformers.FromNodeExecutionModel(*nodeExecutionModel, transformers.DefaultExecutionTransformerOptions) if err != nil { logger.Debugf(ctx, "failed to transform node execution model [%+v] when fetching data: %v", request.Id, err) return nil, err @@ -537,8 +537,9 @@ func (m *NodeExecutionManager) GetNodeExecutionData( return nil, errors.NewFlyteAdminErrorf(codes.Internal, "Empty primary workflow template in loaded dynamic workflow model.") } else { response.DynamicWorkflow = &admin.DynamicWorkflowNodeMetadata{ - Id: closure.Primary.Template.Id, - CompiledWorkflow: closure, + Id: closure.Primary.Template.Id, + CompiledWorkflow: closure, + DynamicJobSpecUri: nodeExecution.Closure.DynamicJobSpecUri, } } } diff --git a/pkg/manager/impl/node_execution_manager_test.go b/pkg/manager/impl/node_execution_manager_test.go index 104a2c9ac..a1c43c36b 100644 --- a/pkg/manager/impl/node_execution_manager_test.go +++ b/pkg/manager/impl/node_execution_manager_test.go @@ -7,6 +7,8 @@ import ( "testing" "time" + "github.com/flyteorg/flyteadmin/pkg/repositories/transformers" + "github.com/flyteorg/flyteadmin/pkg/manager/impl/util" genModel "github.com/flyteorg/flyteadmin/pkg/repositories/gen/models" @@ -64,7 +66,9 @@ var request = admin.NodeExecutionEventRequest{ }, OccurredAt: occurredAtProto, Phase: core.NodeExecution_RUNNING, - InputUri: "input uri", + InputValue: &event.NodeExecutionEvent_InputUri{ + InputUri: "input uri", + }, TargetMetadata: &event.NodeExecutionEvent_TaskNodeMetadata{ TaskNodeMetadata: &event.TaskNodeMetadata{ DynamicWorkflow: &event.DynamicWorkflowNodeMetadata{ @@ -397,7 +401,9 @@ func TestCreateNodeEvent_FirstEventIsTerminal(t *testing.T) { }, OccurredAt: occurredAtProto, Phase: core.NodeExecution_SUCCEEDED, - InputUri: "input uri", + InputValue: &event.NodeExecutionEvent_InputUri{ + InputUri: "input uri", + }, }, } mockDbEventWriter := &eventWriterMocks.NodeExecutionEventWriter{} @@ -443,7 +449,7 @@ func TestTransformNodeExecutionModel(t *testing.T) { manager := NodeExecutionManager{ db: repository, } - nodeExecution, err := manager.transformNodeExecutionModel(ctx, models.NodeExecution{}, nodeExecID) + nodeExecution, err := manager.transformNodeExecutionModel(ctx, models.NodeExecution{}, nodeExecID, transformers.DefaultExecutionTransformerOptions) assert.NoError(t, err) assert.True(t, proto.Equal(nodeExecID, nodeExecution.Id)) assert.True(t, nodeExecution.Metadata.IsParentNode) @@ -473,7 +479,7 @@ func TestTransformNodeExecutionModel(t *testing.T) { Closure: closureBytes, NodeExecutionMetadata: nodeExecutionMetadataBytes, InternalData: internalDataBytes, - }, nodeExecID) + }, nodeExecID, transformers.DefaultExecutionTransformerOptions) assert.NoError(t, err) assert.True(t, nodeExecution.Metadata.IsParentNode) assert.True(t, nodeExecution.Metadata.IsDynamic) @@ -484,7 +490,7 @@ func TestTransformNodeExecutionModel(t *testing.T) { } _, err := manager.transformNodeExecutionModel(ctx, models.NodeExecution{ InternalData: []byte("i'm invalid"), - }, nodeExecID) + }, nodeExecID, transformers.DefaultExecutionTransformerOptions) assert.NotNil(t, err) assert.Equal(t, err.(flyteAdminErrors.FlyteAdminError).Code(), codes.Internal) }) @@ -499,7 +505,7 @@ func TestTransformNodeExecutionModel(t *testing.T) { manager := NodeExecutionManager{ db: repository, } - _, err := manager.transformNodeExecutionModel(ctx, models.NodeExecution{}, nodeExecID) + _, err := manager.transformNodeExecutionModel(ctx, models.NodeExecution{}, nodeExecID, transformers.DefaultExecutionTransformerOptions) assert.Equal(t, err, expectedErr) }) } diff --git a/pkg/manager/impl/resources/resource_manager.go b/pkg/manager/impl/resources/resource_manager.go index a02cd024a..d65658991 100644 --- a/pkg/manager/impl/resources/resource_manager.go +++ b/pkg/manager/impl/resources/resource_manager.go @@ -213,9 +213,8 @@ func (m *ResourceManager) GetProjectAttributes(ctx context.Context, request admi configLevelDefaults := m.config.GetTopLevelConfig().GetAsWorkflowExecutionConfig() if err != nil { ec, ok := err.(errors.FlyteAdminError) - if ok && ec.Code() == codes.NotFound { + if ok && ec.Code() == codes.NotFound && request.ResourceType == admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG { // TODO: Will likely be removed after overarching settings project is done - // Proceed with the default CreateOrUpdate call since there's no existing model to update. return &admin.ProjectAttributesGetResponse{ Attributes: &admin.ProjectAttributes{ Project: request.Project, diff --git a/pkg/manager/impl/resources/resource_manager_test.go b/pkg/manager/impl/resources/resource_manager_test.go index ad886e52e..8c587937c 100644 --- a/pkg/manager/impl/resources/resource_manager_test.go +++ b/pkg/manager/impl/resources/resource_manager_test.go @@ -645,6 +645,26 @@ func TestGetProjectAttributes_ConfigLookup(t *testing.T) { }, }, response)) }) + + t.Run("config not merged if not wec", func(t *testing.T) { + appConfig := runtimeInterfaces.ApplicationConfig{ + MaxParallelism: 3, + K8SServiceAccount: "testserviceaccount", + Labels: map[string]string{"lab1": "name"}, + OutputLocationPrefix: "s3://test-bucket", + } + config.SetTopLevelConfig(appConfig) + request := admin.ProjectAttributesGetRequest{ + Project: project, + ResourceType: admin.MatchableResource_EXECUTION_QUEUE, + } + + _, err := manager.GetProjectAttributes(context.Background(), request) + assert.Error(t, err) + ec, ok := err.(errors.FlyteAdminError) + assert.True(t, ok) + assert.Equal(t, codes.NotFound, ec.Code()) + }) } func TestDeleteProjectAttributes(t *testing.T) { diff --git a/pkg/manager/impl/shared/iface.go b/pkg/manager/impl/shared/iface.go index 8cf83882b..7baae65a1 100644 --- a/pkg/manager/impl/shared/iface.go +++ b/pkg/manager/impl/shared/iface.go @@ -22,4 +22,6 @@ type WorkflowExecutionConfigInterface interface { GetLabels() *admin.Labels // GetInterruptible indicates a workflow should be flagged as interruptible for a single execution. If omitted, the workflow's default is used. GetInterruptible() *wrappers.BoolValue + // GetOverwriteCache indicates a workflow should skip all its cached results and re-compute its output, overwriting any already stored data. + GetOverwriteCache() bool } diff --git a/pkg/manager/impl/signal_manager.go b/pkg/manager/impl/signal_manager.go new file mode 100644 index 000000000..df2fbcc7b --- /dev/null +++ b/pkg/manager/impl/signal_manager.go @@ -0,0 +1,160 @@ +package impl + +import ( + "context" + "strconv" + + "github.com/flyteorg/flytestdlib/contextutils" + + "github.com/flyteorg/flyteadmin/pkg/common" + "github.com/flyteorg/flyteadmin/pkg/errors" + "github.com/flyteorg/flyteadmin/pkg/manager/impl/util" + "github.com/flyteorg/flyteadmin/pkg/manager/impl/validation" + "github.com/flyteorg/flyteadmin/pkg/manager/interfaces" + repoInterfaces "github.com/flyteorg/flyteadmin/pkg/repositories/interfaces" + "github.com/flyteorg/flyteadmin/pkg/repositories/transformers" + + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" + + "github.com/flyteorg/flytestdlib/logger" + "github.com/flyteorg/flytestdlib/promutils" + "github.com/flyteorg/flytestdlib/promutils/labeled" + + "google.golang.org/grpc/codes" +) + +type signalMetrics struct { + Scope promutils.Scope + Set labeled.Counter +} + +type SignalManager struct { + db repoInterfaces.Repository + metrics signalMetrics +} + +func getSignalContext(ctx context.Context, identifier *core.SignalIdentifier) context.Context { + ctx = contextutils.WithProjectDomain(ctx, identifier.ExecutionId.Project, identifier.ExecutionId.Domain) + ctx = contextutils.WithWorkflowID(ctx, identifier.ExecutionId.Name) + return contextutils.WithSignalID(ctx, identifier.SignalId) +} + +func (s *SignalManager) GetOrCreateSignal(ctx context.Context, request admin.SignalGetOrCreateRequest) (*admin.Signal, error) { + if err := validation.ValidateSignalGetOrCreateRequest(ctx, request); err != nil { + logger.Debugf(ctx, "invalid request [%+v]: %v", request, err) + return nil, err + } + ctx = getSignalContext(ctx, request.Id) + + signalModel, err := transformers.CreateSignalModel(request.Id, request.Type, nil) + if err != nil { + logger.Errorf(ctx, "Failed to transform signal with id [%+v] and type [+%v] with err: %v", request.Id, request.Type, err) + return nil, err + } + + err = s.db.SignalRepo().GetOrCreate(ctx, &signalModel) + if err != nil { + return nil, err + } + + signal, err := transformers.FromSignalModel(signalModel) + if err != nil { + logger.Errorf(ctx, "Failed to transform signal model [%+v] with err: %v", signalModel, err) + return nil, err + } + + return &signal, nil +} + +func (s *SignalManager) ListSignals(ctx context.Context, request admin.SignalListRequest) (*admin.SignalList, error) { + if err := validation.ValidateSignalListRequest(ctx, request); err != nil { + logger.Debugf(ctx, "ListSignals request [%+v] is invalid: %v", request, err) + return nil, err + } + ctx = getExecutionContext(ctx, request.WorkflowExecutionId) + + identifierFilters, err := util.GetWorkflowExecutionIdentifierFilters(ctx, *request.WorkflowExecutionId) + if err != nil { + return nil, err + } + + filters, err := util.AddRequestFilters(request.Filters, common.Signal, identifierFilters) + if err != nil { + return nil, err + } + var sortParameter common.SortParameter + if request.SortBy != nil { + sortParameter, err = common.NewSortParameter(*request.SortBy) + if err != nil { + return nil, err + } + } + + offset, err := validation.ValidateToken(request.Token) + if err != nil { + return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, + "invalid pagination token %s for ListSignals", request.Token) + } + + signalModelList, err := s.db.SignalRepo().List(ctx, repoInterfaces.ListResourceInput{ + InlineFilters: filters, + Offset: offset, + Limit: int(request.Limit), + SortParameter: sortParameter, + }) + if err != nil { + logger.Debugf(ctx, "Failed to list signals with request [%+v] with err %v", + request, err) + return nil, err + } + + signalList, err := transformers.FromSignalModels(signalModelList) + if err != nil { + logger.Debugf(ctx, "failed to transform signal models for request [%+v] with err: %v", request, err) + return nil, err + } + var token string + if len(signalList) == int(request.Limit) { + token = strconv.Itoa(offset + len(signalList)) + } + return &admin.SignalList{ + Signals: signalList, + Token: token, + }, nil +} + +func (s *SignalManager) SetSignal(ctx context.Context, request admin.SignalSetRequest) (*admin.SignalSetResponse, error) { + if err := validation.ValidateSignalSetRequest(ctx, s.db, request); err != nil { + return nil, err + } + ctx = getSignalContext(ctx, request.Id) + + signalModel, err := transformers.CreateSignalModel(request.Id, nil, request.Value) + if err != nil { + logger.Errorf(ctx, "Failed to transform signal with id [%+v] and value [+%v] with err: %v", request.Id, request.Value, err) + return nil, err + } + + err = s.db.SignalRepo().Update(ctx, signalModel.SignalKey, signalModel.Value) + if err != nil { + return nil, err + } + + s.metrics.Set.Inc(ctx) + return &admin.SignalSetResponse{}, nil +} + +func NewSignalManager( + db repoInterfaces.Repository, + scope promutils.Scope) interfaces.SignalInterface { + metrics := signalMetrics{ + Scope: scope, + Set: labeled.NewCounter("num_set", "count of set signals", scope), + } + + return &SignalManager{ + db: db, + metrics: metrics, + } +} diff --git a/pkg/manager/impl/signal_manager_test.go b/pkg/manager/impl/signal_manager_test.go new file mode 100644 index 000000000..cc01f07fe --- /dev/null +++ b/pkg/manager/impl/signal_manager_test.go @@ -0,0 +1,241 @@ +package impl + +import ( + "context" + "errors" + "testing" + + repositoryMocks "github.com/flyteorg/flyteadmin/pkg/repositories/mocks" + "github.com/flyteorg/flyteadmin/pkg/repositories/models" + "github.com/flyteorg/flyteadmin/pkg/repositories/transformers" + + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" + + mockScope "github.com/flyteorg/flytestdlib/promutils" + + "github.com/golang/protobuf/proto" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +var ( + signalID = &core.SignalIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Project: "project", + Domain: "domain", + Name: "name", + }, + SignalId: "signal", + } + + signalType = &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_BOOLEAN, + }, + } + + signalValue = &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_Boolean{ + Boolean: false, + }, + }, + }, + }, + }, + } +) + +func TestGetOrCreateSignal(t *testing.T) { + t.Run("Happy", func(t *testing.T) { + mockRepository := repositoryMocks.NewMockRepository() + mockRepository.SignalRepo().(*repositoryMocks.SignalRepoInterface).OnGetOrCreateMatch(mock.Anything, mock.Anything).Return(nil) + + signalManager := NewSignalManager(mockRepository, mockScope.NewTestScope()) + request := admin.SignalGetOrCreateRequest{ + Id: signalID, + Type: signalType, + } + + response, err := signalManager.GetOrCreateSignal(context.Background(), request) + assert.NoError(t, err) + + assert.True(t, proto.Equal(&admin.Signal{ + Id: signalID, + Type: signalType, + }, response)) + }) + + t.Run("ValidationError", func(t *testing.T) { + mockRepository := repositoryMocks.NewMockRepository() + signalManager := NewSignalManager(mockRepository, mockScope.NewTestScope()) + request := admin.SignalGetOrCreateRequest{ + Type: signalType, + } + + _, err := signalManager.GetOrCreateSignal(context.Background(), request) + assert.Error(t, err) + }) + + t.Run("DBError", func(t *testing.T) { + mockRepository := repositoryMocks.NewMockRepository() + mockRepository.SignalRepo().(*repositoryMocks.SignalRepoInterface).OnGetOrCreateMatch(mock.Anything, mock.Anything).Return(errors.New("foo")) + + signalManager := NewSignalManager(mockRepository, mockScope.NewTestScope()) + request := admin.SignalGetOrCreateRequest{ + Id: signalID, + Type: signalType, + } + + _, err := signalManager.GetOrCreateSignal(context.Background(), request) + assert.Error(t, err) + }) +} + +func TestListSignals(t *testing.T) { + signalModel, err := transformers.CreateSignalModel(signalID, signalType, nil) + assert.NoError(t, err) + + t.Run("Happy", func(t *testing.T) { + mockRepository := repositoryMocks.NewMockRepository() + mockRepository.SignalRepo().(*repositoryMocks.SignalRepoInterface). + OnListMatch(mock.Anything, mock.Anything).Return( + []models.Signal{signalModel}, + nil, + ) + + signalManager := NewSignalManager(mockRepository, mockScope.NewTestScope()) + request := admin.SignalListRequest{ + WorkflowExecutionId: &core.WorkflowExecutionIdentifier{ + Project: "project", + Domain: "domain", + Name: "name", + }, + Limit: 20, + } + + response, err := signalManager.ListSignals(context.Background(), request) + assert.NoError(t, err) + + assert.True(t, proto.Equal( + &admin.SignalList{ + Signals: []*admin.Signal{ + &admin.Signal{ + Id: signalID, + Type: signalType, + }, + }, + }, + response, + )) + }) + + t.Run("ValidationError", func(t *testing.T) { + mockRepository := repositoryMocks.NewMockRepository() + signalManager := NewSignalManager(mockRepository, mockScope.NewTestScope()) + request := admin.SignalListRequest{ + WorkflowExecutionId: &core.WorkflowExecutionIdentifier{ + Project: "project", + Domain: "domain", + Name: "name", + }, + } + + _, err := signalManager.ListSignals(context.Background(), request) + assert.Error(t, err) + }) + + t.Run("DBError", func(t *testing.T) { + mockRepository := repositoryMocks.NewMockRepository() + mockRepository.SignalRepo().(*repositoryMocks.SignalRepoInterface). + OnListMatch(mock.Anything, mock.Anything).Return(nil, errors.New("foo")) + + signalManager := NewSignalManager(mockRepository, mockScope.NewTestScope()) + request := admin.SignalListRequest{ + WorkflowExecutionId: &core.WorkflowExecutionIdentifier{ + Project: "project", + Domain: "domain", + Name: "name", + }, + Limit: 20, + } + + _, err := signalManager.ListSignals(context.Background(), request) + assert.Error(t, err) + }) +} + +func TestSetSignal(t *testing.T) { + signalModel, err := transformers.CreateSignalModel(signalID, signalType, nil) + assert.NoError(t, err) + + t.Run("Happy", func(t *testing.T) { + mockRepository := repositoryMocks.NewMockRepository() + mockRepository.SignalRepo().(*repositoryMocks.SignalRepoInterface). + OnGetMatch(mock.Anything, mock.Anything, mock.Anything).Return(signalModel, nil) + mockRepository.SignalRepo().(*repositoryMocks.SignalRepoInterface). + OnUpdateMatch(mock.Anything, mock.Anything, mock.Anything).Return(nil) + + signalManager := NewSignalManager(mockRepository, mockScope.NewTestScope()) + request := admin.SignalSetRequest{ + Id: signalID, + Value: signalValue, + } + + response, err := signalManager.SetSignal(context.Background(), request) + assert.NoError(t, err) + + assert.True(t, proto.Equal(&admin.SignalSetResponse{}, response)) + }) + + t.Run("ValidationError", func(t *testing.T) { + mockRepository := repositoryMocks.NewMockRepository() + signalManager := NewSignalManager(mockRepository, mockScope.NewTestScope()) + request := admin.SignalSetRequest{ + Value: signalValue, + } + + _, err := signalManager.SetSignal(context.Background(), request) + assert.Error(t, err) + }) + + t.Run("DBGetError", func(t *testing.T) { + mockRepository := repositoryMocks.NewMockRepository() + mockRepository.SignalRepo().(*repositoryMocks.SignalRepoInterface). + OnGetMatch(mock.Anything, mock.Anything).Return( + models.Signal{}, + errors.New("foo"), + ) + + signalManager := NewSignalManager(mockRepository, mockScope.NewTestScope()) + request := admin.SignalSetRequest{ + Id: signalID, + Value: signalValue, + } + + _, err := signalManager.SetSignal(context.Background(), request) + assert.Error(t, err) + }) + + t.Run("DBUpdateError", func(t *testing.T) { + mockRepository := repositoryMocks.NewMockRepository() + mockRepository.SignalRepo().(*repositoryMocks.SignalRepoInterface). + OnGetMatch(mock.Anything, mock.Anything).Return(signalModel, nil) + mockRepository.SignalRepo().(*repositoryMocks.SignalRepoInterface). + OnUpdateMatch(mock.Anything, mock.Anything, mock.Anything).Return(errors.New("foo")) + + signalManager := NewSignalManager(mockRepository, mockScope.NewTestScope()) + request := admin.SignalSetRequest{ + Id: signalID, + Value: signalValue, + } + + _, err := signalManager.SetSignal(context.Background(), request) + assert.Error(t, err) + }) +} diff --git a/pkg/manager/impl/task_execution_manager.go b/pkg/manager/impl/task_execution_manager.go index db88b77c8..60825309b 100644 --- a/pkg/manager/impl/task_execution_manager.go +++ b/pkg/manager/impl/task_execution_manager.go @@ -233,7 +233,7 @@ func (m *TaskExecutionManager) GetTaskExecution( if err != nil { return nil, err } - taskExecution, err := transformers.FromTaskExecutionModel(*taskExecutionModel) + taskExecution, err := transformers.FromTaskExecutionModel(*taskExecutionModel, transformers.DefaultExecutionTransformerOptions) if err != nil { logger.Debugf(ctx, "Failed to transform task execution model [%+v] to proto: %v", request.Id, err) return nil, err @@ -284,7 +284,8 @@ func (m *TaskExecutionManager) ListTaskExecutions( return nil, err } - taskExecutionList, err := transformers.FromTaskExecutionModels(output.TaskExecutions) + // Use default transformer options so that error messages shown for task execution attempts in the console sidebar show the full error stack trace. + taskExecutionList, err := transformers.FromTaskExecutionModels(output.TaskExecutions, transformers.DefaultExecutionTransformerOptions) if err != nil { logger.Debugf(ctx, "failed to transform task execution models for request [%+v] with err: %v", request, err) return nil, err diff --git a/pkg/manager/impl/task_execution_manager_test.go b/pkg/manager/impl/task_execution_manager_test.go index ec3bb434a..cc59012bb 100644 --- a/pkg/manager/impl/task_execution_manager_test.go +++ b/pkg/manager/impl/task_execution_manager_test.go @@ -57,7 +57,9 @@ var taskEventRequest = admin.TaskExecutionEventRequest{ OccurredAt: sampleTaskEventOccurredAt, Phase: core.TaskExecution_RUNNING, RetryAttempt: uint32(1), - InputUri: "input uri", + InputValue: &event.TaskExecutionEvent_InputUri{ + InputUri: "input uri", + }, }, } @@ -181,6 +183,15 @@ func TestCreateTaskEvent(t *testing.T) { assert.True(t, createTaskCalled) assert.Nil(t, err) assert.NotNil(t, resp) + + repository.TaskExecutionRepo().(*repositoryMocks.MockTaskExecutionRepo).SetCreateCallback( + func(ctx context.Context, input models.TaskExecution) error { + return errors.New("failed to insert record into task table") + }) + taskExecManager = NewTaskExecutionManager(repository, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockTaskExecutionRemoteURL, nil, nil) + resp, err = taskExecManager.CreateTaskExecutionEvent(context.Background(), taskEventRequest) + assert.NotNil(t, err) + assert.Nil(t, resp) } func TestCreateTaskEvent_Update(t *testing.T) { @@ -607,8 +618,11 @@ func TestListTaskExecutions(t *testing.T) { repository := repositoryMocks.NewMockRepository() expectedLogs := []*core.TaskLog{{Uri: "test-log1.txt"}} - expectedOutputResult := &admin.TaskExecutionClosure_OutputUri{ - OutputUri: "test-output.pb", + extraLongErrMsg := string(make([]byte, 2*100)) + expectedOutputResult := &admin.TaskExecutionClosure_Error{ + Error: &core.ExecutionError{ + Message: extraLongErrMsg, + }, } expectedClosure := &admin.TaskExecutionClosure{ StartedAt: sampleTaskEventOccurredAt, diff --git a/pkg/manager/impl/task_manager.go b/pkg/manager/impl/task_manager.go index 57843149a..b4346fcd9 100644 --- a/pkg/manager/impl/task_manager.go +++ b/pkg/manager/impl/task_manager.go @@ -20,6 +20,7 @@ import ( "github.com/flyteorg/flyteadmin/pkg/common" "github.com/flyteorg/flyteadmin/pkg/errors" + "github.com/flyteorg/flyteadmin/pkg/manager/impl/resources" "github.com/flyteorg/flyteadmin/pkg/manager/impl/util" "github.com/flyteorg/flyteadmin/pkg/manager/impl/validation" "github.com/flyteorg/flyteadmin/pkg/manager/interfaces" @@ -38,10 +39,11 @@ type taskMetrics struct { } type TaskManager struct { - db repoInterfaces.Repository - config runtimeInterfaces.Configuration - compiler workflowengine.Compiler - metrics taskMetrics + db repoInterfaces.Repository + config runtimeInterfaces.Configuration + compiler workflowengine.Compiler + metrics taskMetrics + resourceManager interfaces.ResourceInterface } func getTaskContext(ctx context.Context, identifier *core.Identifier) context.Context { @@ -62,7 +64,8 @@ func setDefaults(request admin.TaskCreateRequest) (admin.TaskCreateRequest, erro func (t *TaskManager) CreateTask( ctx context.Context, request admin.TaskCreateRequest) (*admin.TaskCreateResponse, error) { - if err := validation.ValidateTask(ctx, request, t.db, t.config.TaskResourceConfiguration(), + platformTaskResources := util.GetTaskResources(ctx, request.Id, t.resourceManager, t.config.TaskResourceConfiguration()) + if err := validation.ValidateTask(ctx, request, t.db, platformTaskResources, t.config.WhitelistConfiguration(), t.config.ApplicationConfiguration()); err != nil { logger.Debugf(ctx, "Task [%+v] failed validation with err: %v", request.Id, err) return nil, err @@ -107,7 +110,17 @@ func (t *TaskManager) CreateTask( "Failed to transform task model [%+v] with err: %v", finalizedRequest, err) return nil, err } - err = t.db.TaskRepo().Create(ctx, taskModel) + + descriptionModel, err := transformers.CreateDescriptionEntityModel(request.Spec.Description, *request.Id) + if err != nil { + logger.Errorf(ctx, + "Failed to transform description model [%+v] with err: %v", request.Spec.Description, err) + return nil, err + } + if descriptionModel != nil { + taskModel.ShortDescription = descriptionModel.ShortDescription + } + err = t.db.TaskRepo().Create(ctx, taskModel, descriptionModel) if err != nil { logger.Debugf(ctx, "Failed to create task model with id [%+v] with err %v", request.Id, err) return nil, err @@ -120,6 +133,7 @@ func (t *TaskManager) CreateTask( contextWithRuntimeMeta, common.RuntimeVersionKey, finalizedRequest.Spec.Template.Metadata.Runtime.Version) t.metrics.Registered.Inc(contextWithRuntimeMeta) } + return &admin.TaskCreateResponse{}, nil } @@ -258,10 +272,12 @@ func NewTaskManager( ClosureSizeBytes: scope.MustNewSummary("closure_size_bytes", "size in bytes of serialized task closure"), Registered: labeled.NewCounter("num_registered", "count of registered tasks", scope), } + resourceManager := resources.NewResourceManager(db, config.ApplicationConfiguration()) return &TaskManager{ - db: db, - config: config, - compiler: compiler, - metrics: metrics, + db: db, + config: config, + compiler: compiler, + metrics: metrics, + resourceManager: resourceManager, } } diff --git a/pkg/manager/impl/task_manager_test.go b/pkg/manager/impl/task_manager_test.go index a7d78bf9c..8b8a38e76 100644 --- a/pkg/manager/impl/task_manager_test.go +++ b/pkg/manager/impl/task_manager_test.go @@ -73,22 +73,30 @@ func TestCreateTask(t *testing.T) { return models.Task{}, errors.New("foo") }) var createCalled bool - mockRepository.TaskRepo().(*repositoryMocks.MockTaskRepo).SetCreateCallback(func(input models.Task) error { + mockRepository.TaskRepo().(*repositoryMocks.MockTaskRepo).SetCreateCallback(func(input models.Task, descriptionEntity *models.DescriptionEntity) error { assert.Equal(t, []byte{ 0xbf, 0x79, 0x61, 0x1c, 0xf5, 0xc1, 0xfb, 0x4c, 0xf8, 0xf4, 0xc4, 0x53, 0x5f, 0x8f, 0x73, 0xe2, 0x26, 0x5a, 0x18, 0x4a, 0xb7, 0x66, 0x98, 0x3c, 0xab, 0x2, 0x6c, 0x9, 0x9b, 0x90, 0xec, 0x8f}, input.Digest) createCalled = true return nil }) + mockRepository.DescriptionEntityRepo().(*repositoryMocks.MockDescriptionEntityRepo).SetGetCallback( + func(input interfaces.GetDescriptionEntityInput) (models.DescriptionEntity, error) { + return models.DescriptionEntity{}, adminErrors.NewFlyteAdminErrorf(codes.NotFound, "NotFound") + }) taskManager := NewTaskManager(mockRepository, getMockConfigForTaskTest(), getMockTaskCompiler(), mockScope.NewTestScope()) request := testutils.GetValidTaskRequest() response, err := taskManager.CreateTask(context.Background(), request) assert.NoError(t, err) - expectedResponse := &admin.TaskCreateResponse{} - assert.Equal(t, expectedResponse, response) + assert.Equal(t, &admin.TaskCreateResponse{}, response) assert.True(t, createCalled) + + request.Spec.Description = nil + response, err = taskManager.CreateTask(context.Background(), request) + assert.NoError(t, err) + assert.NotNil(t, response) } func TestCreateTask_ValidationError(t *testing.T) { @@ -125,7 +133,7 @@ func TestCreateTask_DatabaseError(t *testing.T) { return models.Task{}, errors.New("foo") }) expectedErr := errors.New("expected error") - taskCreateFunc := func(input models.Task) error { + taskCreateFunc := func(input models.Task, descriptionEntity *models.DescriptionEntity) error { return expectedErr } diff --git a/pkg/manager/impl/testutils/mock_requests.go b/pkg/manager/impl/testutils/mock_requests.go index 9346b6996..2a8d47dd9 100644 --- a/pkg/manager/impl/testutils/mock_requests.go +++ b/pkg/manager/impl/testutils/mock_requests.go @@ -41,6 +41,7 @@ func GetValidTaskRequest() admin.TaskCreateRequest { }, }, }, + Description: &admin.DescriptionEntity{ShortDescription: "hello"}, }, } } @@ -119,6 +120,7 @@ func GetWorkflowRequest() admin.WorkflowCreateRequest { }, }, }, + Description: &admin.DescriptionEntity{ShortDescription: "hello"}, }, } } diff --git a/pkg/manager/impl/util/filters.go b/pkg/manager/impl/util/filters.go index a21404960..e52bfb8b1 100644 --- a/pkg/manager/impl/util/filters.go +++ b/pkg/manager/impl/util/filters.go @@ -61,6 +61,7 @@ var filterFieldEntityPrefix = map[string]common.Entity{ "entities": common.NamedEntity, "named_entity_metadata": common.NamedEntityMetadata, "project": common.Project, + "signal": common.Signal, } func parseField(field string, primaryEntity common.Entity) (common.Entity, string) { diff --git a/pkg/manager/impl/util/resources.go b/pkg/manager/impl/util/resources.go new file mode 100644 index 000000000..f09695723 --- /dev/null +++ b/pkg/manager/impl/util/resources.go @@ -0,0 +1,120 @@ +package util + +import ( + "context" + "fmt" + + "github.com/flyteorg/flyteadmin/pkg/manager/interfaces" + runtimeInterfaces "github.com/flyteorg/flyteadmin/pkg/runtime/interfaces" + workflowengineInterfaces "github.com/flyteorg/flyteadmin/pkg/workflowengine/interfaces" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flytestdlib/logger" + "k8s.io/apimachinery/pkg/api/resource" +) + +// parseQuantityNoError parses the k8s defined resource quantity gracefully masking errors. +func parseQuantityNoError(ctx context.Context, ownerID, name, value string) resource.Quantity { + q, err := resource.ParseQuantity(value) + if err != nil { + logger.Infof(ctx, "Failed to parse owner's [%s] resource [%s]'s value [%s] with err: %v", ownerID, name, value, err) + } + + return q +} + +// getTaskResourcesAsSet converts a list of flyteidl `ResourceEntry` messages into a singular `TaskResourceSet`. +func getTaskResourcesAsSet(ctx context.Context, identifier *core.Identifier, + resourceEntries []*core.Resources_ResourceEntry, resourceName string) runtimeInterfaces.TaskResourceSet { + + result := runtimeInterfaces.TaskResourceSet{} + for _, entry := range resourceEntries { + switch entry.Name { + case core.Resources_CPU: + result.CPU = parseQuantityNoError(ctx, identifier.String(), fmt.Sprintf("%v.cpu", resourceName), entry.Value) + case core.Resources_MEMORY: + result.Memory = parseQuantityNoError(ctx, identifier.String(), fmt.Sprintf("%v.memory", resourceName), entry.Value) + case core.Resources_EPHEMERAL_STORAGE: + result.EphemeralStorage = parseQuantityNoError(ctx, identifier.String(), + fmt.Sprintf("%v.ephemeral storage", resourceName), entry.Value) + case core.Resources_GPU: + result.GPU = parseQuantityNoError(ctx, identifier.String(), "gpu", entry.Value) + } + } + + return result +} + +// GetCompleteTaskResourceRequirements parses the resource requests and limits from the `TaskTemplate` Container. +func GetCompleteTaskResourceRequirements(ctx context.Context, identifier *core.Identifier, task *core.CompiledTask) workflowengineInterfaces.TaskResources { + return workflowengineInterfaces.TaskResources{ + Defaults: getTaskResourcesAsSet(ctx, identifier, task.GetTemplate().GetContainer().Resources.Requests, "requests"), + Limits: getTaskResourcesAsSet(ctx, identifier, task.GetTemplate().GetContainer().Resources.Limits, "limits"), + } +} + +// fromAdminProtoTaskResourceSpec parses the flyteidl `TaskResourceSpec` message into a `TaskResourceSet`. +func fromAdminProtoTaskResourceSpec(ctx context.Context, spec *admin.TaskResourceSpec) runtimeInterfaces.TaskResourceSet { + result := runtimeInterfaces.TaskResourceSet{} + if len(spec.Cpu) > 0 { + result.CPU = parseQuantityNoError(ctx, "project", "cpu", spec.Cpu) + } + + if len(spec.Memory) > 0 { + result.Memory = parseQuantityNoError(ctx, "project", "memory", spec.Memory) + } + + if len(spec.Storage) > 0 { + result.Storage = parseQuantityNoError(ctx, "project", "storage", spec.Storage) + } + + if len(spec.EphemeralStorage) > 0 { + result.EphemeralStorage = parseQuantityNoError(ctx, "project", "ephemeral storage", spec.EphemeralStorage) + } + + if len(spec.Gpu) > 0 { + result.GPU = parseQuantityNoError(ctx, "project", "gpu", spec.Gpu) + } + + return result +} + +// GetTaskResources returns the most specific default and limit task resources for the specified id. This first checks +// if there is a matchable resource(s) defined, and uses the highest priority one, otherwise it falls back to using the +// flyteadmin default configured values. +func GetTaskResources(ctx context.Context, id *core.Identifier, resourceManager interfaces.ResourceInterface, + taskResourceConfig runtimeInterfaces.TaskResourceConfiguration) workflowengineInterfaces.TaskResources { + + request := interfaces.ResourceRequest{ + ResourceType: admin.MatchableResource_TASK_RESOURCE, + } + if id != nil && len(id.Project) > 0 { + request.Project = id.Project + } + if id != nil && len(id.Domain) > 0 { + request.Domain = id.Domain + } + if id != nil && id.ResourceType == core.ResourceType_WORKFLOW && len(id.Name) > 0 { + request.Workflow = id.Name + } + + resource, err := resourceManager.GetResource(ctx, request) + if err != nil { + logger.Warningf(ctx, "Failed to fetch override values when assigning task resource default values for [%+v]: %v", + id, err) + } + + logger.Debugf(ctx, "Assigning task requested resources for [%+v]", id) + var taskResourceAttributes = workflowengineInterfaces.TaskResources{} + if resource != nil && resource.Attributes != nil && resource.Attributes.GetTaskResourceAttributes() != nil { + taskResourceAttributes.Defaults = fromAdminProtoTaskResourceSpec(ctx, resource.Attributes.GetTaskResourceAttributes().Defaults) + taskResourceAttributes.Limits = fromAdminProtoTaskResourceSpec(ctx, resource.Attributes.GetTaskResourceAttributes().Limits) + } else { + taskResourceAttributes = workflowengineInterfaces.TaskResources{ + Defaults: taskResourceConfig.GetDefaults(), + Limits: taskResourceConfig.GetLimits(), + } + } + + return taskResourceAttributes +} diff --git a/pkg/manager/impl/util/resources_test.go b/pkg/manager/impl/util/resources_test.go new file mode 100644 index 000000000..f4180c4b5 --- /dev/null +++ b/pkg/manager/impl/util/resources_test.go @@ -0,0 +1,229 @@ +package util + +import ( + "context" + "testing" + + managerInterfaces "github.com/flyteorg/flyteadmin/pkg/manager/interfaces" + managerMocks "github.com/flyteorg/flyteadmin/pkg/manager/mocks" + runtimeInterfaces "github.com/flyteorg/flyteadmin/pkg/runtime/interfaces" + runtimeMocks "github.com/flyteorg/flyteadmin/pkg/runtime/mocks" + workflowengineInterfaces "github.com/flyteorg/flyteadmin/pkg/workflowengine/interfaces" + + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" + + "github.com/stretchr/testify/assert" + + "k8s.io/apimachinery/pkg/api/resource" +) + +var workflowIdentifier = core.Identifier{ + ResourceType: core.ResourceType_WORKFLOW, + Project: "project", + Domain: "domain", + Name: "name", + Version: "version", +} + +func TestGetTaskResources(t *testing.T) { + taskConfig := runtimeMocks.MockTaskResourceConfiguration{} + taskConfig.Defaults = runtimeInterfaces.TaskResourceSet{ + CPU: resource.MustParse("200m"), + GPU: resource.MustParse("8"), + Memory: resource.MustParse("200Gi"), + EphemeralStorage: resource.MustParse("500Mi"), + Storage: resource.MustParse("400Mi"), + } + taskConfig.Limits = runtimeInterfaces.TaskResourceSet{ + CPU: resource.MustParse("300m"), + GPU: resource.MustParse("8"), + Memory: resource.MustParse("500Gi"), + EphemeralStorage: resource.MustParse("501Mi"), + Storage: resource.MustParse("450Mi"), + } + + t.Run("use runtime application values", func(t *testing.T) { + resourceManager := managerMocks.MockResourceManager{} + resourceManager.GetResourceFunc = func(ctx context.Context, + request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) { + assert.EqualValues(t, request, managerInterfaces.ResourceRequest{ + Project: workflowIdentifier.Project, + Domain: workflowIdentifier.Domain, + Workflow: workflowIdentifier.Name, + ResourceType: admin.MatchableResource_TASK_RESOURCE, + }) + return &managerInterfaces.ResourceResponse{}, nil + } + + taskResourceAttrs := GetTaskResources(context.TODO(), &workflowIdentifier, &resourceManager, &taskConfig) + assert.EqualValues(t, taskResourceAttrs, workflowengineInterfaces.TaskResources{ + Defaults: runtimeInterfaces.TaskResourceSet{ + CPU: resource.MustParse("200m"), + GPU: resource.MustParse("8"), + Memory: resource.MustParse("200Gi"), + EphemeralStorage: resource.MustParse("500Mi"), + Storage: resource.MustParse("400Mi"), + }, + Limits: runtimeInterfaces.TaskResourceSet{ + CPU: resource.MustParse("300m"), + GPU: resource.MustParse("8"), + Memory: resource.MustParse("500Gi"), + EphemeralStorage: resource.MustParse("501Mi"), + Storage: resource.MustParse("450Mi"), + }, + }) + }) + t.Run("use specific overrides", func(t *testing.T) { + resourceManager := managerMocks.MockResourceManager{} + resourceManager.GetResourceFunc = func(ctx context.Context, + request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) { + assert.EqualValues(t, request, managerInterfaces.ResourceRequest{ + Project: workflowIdentifier.Project, + Domain: workflowIdentifier.Domain, + Workflow: workflowIdentifier.Name, + ResourceType: admin.MatchableResource_TASK_RESOURCE, + }) + return &managerInterfaces.ResourceResponse{ + Attributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_TaskResourceAttributes{ + TaskResourceAttributes: &admin.TaskResourceAttributes{ + Defaults: &admin.TaskResourceSpec{ + Cpu: "1200m", + Gpu: "18", + Memory: "1200Gi", + EphemeralStorage: "1500Mi", + Storage: "1400Mi", + }, + Limits: &admin.TaskResourceSpec{ + Cpu: "300m", + Gpu: "8", + Memory: "500Gi", + EphemeralStorage: "501Mi", + Storage: "450Mi", + }, + }, + }, + }, + }, nil + } + taskResourceAttrs := GetTaskResources(context.TODO(), &workflowIdentifier, &resourceManager, &taskConfig) + assert.EqualValues(t, taskResourceAttrs, workflowengineInterfaces.TaskResources{ + Defaults: runtimeInterfaces.TaskResourceSet{ + CPU: resource.MustParse("1200m"), + GPU: resource.MustParse("18"), + Memory: resource.MustParse("1200Gi"), + EphemeralStorage: resource.MustParse("1500Mi"), + Storage: resource.MustParse("1400Mi"), + }, + Limits: runtimeInterfaces.TaskResourceSet{ + CPU: resource.MustParse("300m"), + GPU: resource.MustParse("8"), + Memory: resource.MustParse("500Gi"), + EphemeralStorage: resource.MustParse("501Mi"), + Storage: resource.MustParse("450Mi"), + }, + }) + }) +} + +func TestFromAdminProtoTaskResourceSpec(t *testing.T) { + taskResourceSet := fromAdminProtoTaskResourceSpec(context.TODO(), &admin.TaskResourceSpec{ + Cpu: "1", + Memory: "100", + Storage: "200", + EphemeralStorage: "300", + Gpu: "2", + }) + assert.EqualValues(t, runtimeInterfaces.TaskResourceSet{ + CPU: resource.MustParse("1"), + Memory: resource.MustParse("100"), + Storage: resource.MustParse("200"), + EphemeralStorage: resource.MustParse("300"), + GPU: resource.MustParse("2"), + }, taskResourceSet) +} + +func TestGetTaskResourcesAsSet(t *testing.T) { + taskResources := getTaskResourcesAsSet(context.TODO(), &core.Identifier{}, []*core.Resources_ResourceEntry{ + { + Name: core.Resources_CPU, + Value: "100", + }, + { + Name: core.Resources_MEMORY, + Value: "200", + }, + { + Name: core.Resources_EPHEMERAL_STORAGE, + Value: "300", + }, + { + Name: core.Resources_GPU, + Value: "400", + }, + }, "request") + assert.True(t, taskResources.CPU.Equal(resource.MustParse("100"))) + assert.True(t, taskResources.Memory.Equal(resource.MustParse("200"))) + assert.True(t, taskResources.EphemeralStorage.Equal(resource.MustParse("300"))) + assert.True(t, taskResources.GPU.Equal(resource.MustParse("400"))) +} + +func TestGetCompleteTaskResourceRequirements(t *testing.T) { + taskResources := GetCompleteTaskResourceRequirements(context.TODO(), &core.Identifier{}, &core.CompiledTask{ + Template: &core.TaskTemplate{ + Target: &core.TaskTemplate_Container{ + Container: &core.Container{ + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + { + Name: core.Resources_CPU, + Value: "100", + }, + { + Name: core.Resources_MEMORY, + Value: "200", + }, + { + Name: core.Resources_EPHEMERAL_STORAGE, + Value: "300", + }, + { + Name: core.Resources_GPU, + Value: "400", + }, + }, + Limits: []*core.Resources_ResourceEntry{ + { + Name: core.Resources_CPU, + Value: "200", + }, + { + Name: core.Resources_MEMORY, + Value: "400", + }, + { + Name: core.Resources_EPHEMERAL_STORAGE, + Value: "600", + }, + { + Name: core.Resources_GPU, + Value: "800", + }, + }, + }, + }, + }, + }, + }) + + assert.True(t, taskResources.Defaults.CPU.Equal(resource.MustParse("100"))) + assert.True(t, taskResources.Defaults.Memory.Equal(resource.MustParse("200"))) + assert.True(t, taskResources.Defaults.EphemeralStorage.Equal(resource.MustParse("300"))) + assert.True(t, taskResources.Defaults.GPU.Equal(resource.MustParse("400"))) + + assert.True(t, taskResources.Limits.CPU.Equal(resource.MustParse("200"))) + assert.True(t, taskResources.Limits.Memory.Equal(resource.MustParse("400"))) + assert.True(t, taskResources.Limits.EphemeralStorage.Equal(resource.MustParse("600"))) + assert.True(t, taskResources.Limits.GPU.Equal(resource.MustParse("800"))) +} diff --git a/pkg/manager/impl/util/shared.go b/pkg/manager/impl/util/shared.go index 8bf449af8..bf9490473 100644 --- a/pkg/manager/impl/util/shared.go +++ b/pkg/manager/impl/util/shared.go @@ -138,6 +138,36 @@ func GetNamedEntity( return &metadata, nil } +func GetDescriptionEntityModel( + ctx context.Context, repo repoInterfaces.Repository, identifier core.Identifier) (models.DescriptionEntity, error) { + descriptionEntityModel, err := (repo).DescriptionEntityRepo().Get(ctx, repoInterfaces.GetDescriptionEntityInput{ + ResourceType: identifier.ResourceType, + Project: identifier.Project, + Domain: identifier.Domain, + Name: identifier.Name, + Version: identifier.Version, + }) + if err != nil { + return models.DescriptionEntity{}, err + } + return descriptionEntityModel, nil +} + +func GetDescriptionEntity( + ctx context.Context, repo repoInterfaces.Repository, identifier core.Identifier) (*admin.DescriptionEntity, error) { + descriptionEntityModel, err := GetDescriptionEntityModel(ctx, repo, identifier) + if err != nil { + logger.Errorf(ctx, "Failed to get description entity [%+v]: %v", identifier, err) + return nil, err + } + descriptionEntity, err := transformers.FromDescriptionEntityModel(descriptionEntityModel) + if err != nil { + logger.Errorf(ctx, "Failed to unmarshal description entity [%+v]: %v", descriptionEntityModel, err) + return nil, err + } + return descriptionEntity, nil +} + // Returns the set of filters necessary to query launch plan models to find the active version of a launch plan func GetActiveLaunchPlanVersionFilters(project, domain, name string) ([]common.InlineFilter, error) { projectFilter, err := common.NewSingleValueFilter(common.LaunchPlan, common.Equal, shared.Project, project) @@ -204,7 +234,6 @@ func GetNodeExecutionModel(ctx context.Context, repo repoInterfaces.Repository, func GetTaskModel(ctx context.Context, repo repoInterfaces.Repository, taskIdentifier *core.Identifier) ( *models.Task, error) { - taskModel, err := repo.TaskRepo().Get(ctx, repoInterfaces.Identifier{ Project: taskIdentifier.Project, Domain: taskIdentifier.Domain, @@ -256,7 +285,7 @@ func GetMatchableResource(ctx context.Context, resourceManager interfaces.Resour } // MergeIntoExecConfig into workflowExecConfig (higher priority) from spec (lower priority) and return the -// a new object with the merged changes. +// new object with the merged changes. // After settings project is done, can move this function back to execution manager. Currently shared with resource. func MergeIntoExecConfig(workflowExecConfig admin.WorkflowExecutionConfig, spec shared.WorkflowExecutionConfigInterface) admin.WorkflowExecutionConfig { if workflowExecConfig.GetMaxParallelism() == 0 && spec.GetMaxParallelism() > 0 { @@ -291,5 +320,10 @@ func MergeIntoExecConfig(workflowExecConfig admin.WorkflowExecutionConfig, spec if workflowExecConfig.GetInterruptible() == nil && spec.GetInterruptible() != nil { workflowExecConfig.Interruptible = spec.GetInterruptible() } + + if !workflowExecConfig.GetOverwriteCache() && spec.GetOverwriteCache() { + workflowExecConfig.OverwriteCache = spec.GetOverwriteCache() + } + return workflowExecConfig } diff --git a/pkg/manager/impl/util/shared_test.go b/pkg/manager/impl/util/shared_test.go index 56b658332..e77607af6 100644 --- a/pkg/manager/impl/util/shared_test.go +++ b/pkg/manager/impl/util/shared_test.go @@ -568,6 +568,89 @@ func TestGetMatchableResource(t *testing.T) { }) } +func TestGetDescriptionEntityModel(t *testing.T) { + repository := repositoryMocks.NewMockRepository() + t.Run("Get Description Entity model", func(t *testing.T) { + entity, err := GetDescriptionEntityModel(context.Background(), repository, + core.Identifier{ + ResourceType: core.ResourceType_TASK, + Project: project, + Domain: domain, + Name: name, + Version: version, + }) + assert.Nil(t, err) + assert.NotNil(t, entity) + assert.Equal(t, "hello world", entity.ShortDescription) + }) + + t.Run("Failed to get DescriptionEntity model", func(t *testing.T) { + getFunction := func(input interfaces.GetDescriptionEntityInput) (models.DescriptionEntity, error) { + return models.DescriptionEntity{}, flyteAdminErrors.NewFlyteAdminErrorf(codes.NotFound, "NotFound") + } + repository.DescriptionEntityRepo().(*repositoryMocks.MockDescriptionEntityRepo).SetGetCallback(getFunction) + entity, err := GetDescriptionEntityModel(context.Background(), repository, + core.Identifier{ + ResourceType: core.ResourceType_TASK, + Project: project, + Domain: domain, + Name: name, + Version: version, + }) + assert.Error(t, err) + assert.Equal(t, "", entity.Name) + }) +} + +func TestGetDescriptionEntity(t *testing.T) { + repository := repositoryMocks.NewMockRepository() + t.Run("Get Description Entity", func(t *testing.T) { + entity, err := GetDescriptionEntity(context.Background(), repository, + core.Identifier{ + ResourceType: core.ResourceType_TASK, + Project: project, + Domain: domain, + Name: name, + Version: version, + }) + assert.Nil(t, err) + assert.NotNil(t, entity) + assert.Equal(t, "hello world", entity.ShortDescription) + }) + + t.Run("Failed to get DescriptionEntity", func(t *testing.T) { + getFunction := func(input interfaces.GetDescriptionEntityInput) (models.DescriptionEntity, error) { + return models.DescriptionEntity{}, flyteAdminErrors.NewFlyteAdminErrorf(codes.NotFound, "NotFound") + } + repository.DescriptionEntityRepo().(*repositoryMocks.MockDescriptionEntityRepo).SetGetCallback(getFunction) + entity, err := GetDescriptionEntity(context.Background(), repository, + core.Identifier{ + ResourceType: core.ResourceType_TASK, + Project: project, + Domain: domain, + Name: name, + Version: version, + }) + assert.Error(t, err) + assert.Nil(t, entity) + + getFunction = func(input interfaces.GetDescriptionEntityInput) (models.DescriptionEntity, error) { + return models.DescriptionEntity{LongDescription: []byte("???")}, nil + } + repository.DescriptionEntityRepo().(*repositoryMocks.MockDescriptionEntityRepo).SetGetCallback(getFunction) + entity, err = GetDescriptionEntity(context.Background(), repository, + core.Identifier{ + ResourceType: core.ResourceType_TASK, + Project: project, + Domain: domain, + Name: name, + Version: version, + }) + assert.Error(t, err) + assert.Nil(t, entity) + }) +} + func TestMergeIntoExecConfig(t *testing.T) { var res admin.WorkflowExecutionConfig parameters := []struct { diff --git a/pkg/manager/impl/util/single_task_execution_test.go b/pkg/manager/impl/util/single_task_execution_test.go index a36513fec..25b1c59f4 100644 --- a/pkg/manager/impl/util/single_task_execution_test.go +++ b/pkg/manager/impl/util/single_task_execution_test.go @@ -72,7 +72,7 @@ func TestCreateOrGetWorkflowModel(t *testing.T) { repository := repositoryMocks.NewMockRepository() var getCalledCount = 0 var newlyCreatedWorkflow models.Workflow - workflowcreateFunc := func(input models.Workflow) error { + workflowcreateFunc := func(input models.Workflow, descriptionEntity *models.DescriptionEntity) error { newlyCreatedWorkflow = input return nil } diff --git a/pkg/manager/impl/validation/execution_validator.go b/pkg/manager/impl/validation/execution_validator.go index 8db798eb0..73c31e1cc 100644 --- a/pkg/manager/impl/validation/execution_validator.go +++ b/pkg/manager/impl/validation/execution_validator.go @@ -104,7 +104,7 @@ func CheckAndFetchInputsForExecution( } else { inputType := validators.LiteralTypeForLiteral(executionInputMap[name]) if !validators.AreTypesCastable(inputType, expectedInput.GetVar().GetType()) { - return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid %s input wrong type", name) + return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid %s input wrong type. Expected %s, but got %s", name, expectedInput.GetVar().GetType(), inputType) } } } diff --git a/pkg/manager/impl/validation/execution_validator_test.go b/pkg/manager/impl/validation/execution_validator_test.go index 0af0ebfde..3aac99451 100644 --- a/pkg/manager/impl/validation/execution_validator_test.go +++ b/pkg/manager/impl/validation/execution_validator_test.go @@ -99,7 +99,7 @@ func TestValidateExecInputsWrongType(t *testing.T) { lpRequest.Spec.FixedInputs, lpRequest.Spec.DefaultInputs, ) - assert.EqualError(t, err, "invalid foo input wrong type") + assert.EqualError(t, err, "invalid foo input wrong type. Expected simple:STRING , but got simple:INTEGER ") } func TestValidateExecInputsExtraInputs(t *testing.T) { diff --git a/pkg/manager/impl/validation/signal_validator.go b/pkg/manager/impl/validation/signal_validator.go new file mode 100644 index 000000000..11c5b335d --- /dev/null +++ b/pkg/manager/impl/validation/signal_validator.go @@ -0,0 +1,88 @@ +package validation + +import ( + "context" + + "github.com/flyteorg/flyteadmin/pkg/errors" + "github.com/flyteorg/flyteadmin/pkg/manager/impl/shared" + repositoryInterfaces "github.com/flyteorg/flyteadmin/pkg/repositories/interfaces" + "github.com/flyteorg/flyteadmin/pkg/repositories/transformers" + + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" + + propellervalidators "github.com/flyteorg/flytepropeller/pkg/compiler/validators" + + "google.golang.org/grpc/codes" +) + +func ValidateSignalGetOrCreateRequest(ctx context.Context, request admin.SignalGetOrCreateRequest) error { + if request.Id == nil { + return shared.GetMissingArgumentError("id") + } + if err := ValidateSignalIdentifier(*request.Id); err != nil { + return err + } + if request.Type == nil { + return shared.GetMissingArgumentError("type") + } + + return nil +} + +func ValidateSignalIdentifier(identifier core.SignalIdentifier) error { + if identifier.ExecutionId == nil { + return shared.GetMissingArgumentError(shared.ExecutionID) + } + if identifier.SignalId == "" { + return shared.GetMissingArgumentError("signal_id") + } + + return ValidateWorkflowExecutionIdentifier(identifier.ExecutionId) +} + +func ValidateSignalListRequest(ctx context.Context, request admin.SignalListRequest) error { + if err := ValidateWorkflowExecutionIdentifier(request.WorkflowExecutionId); err != nil { + return shared.GetMissingArgumentError(shared.ExecutionID) + } + if err := ValidateLimit(request.Limit); err != nil { + return err + } + return nil +} + +func ValidateSignalSetRequest(ctx context.Context, db repositoryInterfaces.Repository, request admin.SignalSetRequest) error { + if request.Id == nil { + return shared.GetMissingArgumentError("id") + } + if err := ValidateSignalIdentifier(*request.Id); err != nil { + return err + } + if request.Value == nil { + return shared.GetMissingArgumentError("value") + } + + // validate that signal value matches type of existing signal + signalModel, err := transformers.CreateSignalModel(request.Id, nil, nil) + if err != nil { + return nil + } + lookupSignalModel, err := db.SignalRepo().Get(ctx, signalModel.SignalKey) + if err != nil { + return errors.NewFlyteAdminErrorf(codes.InvalidArgument, + "failed to validate that signal [%v] exists, err: [%+v]", + signalModel.SignalKey, err) + } + valueType := propellervalidators.LiteralTypeForLiteral(request.Value) + lookupSignal, err := transformers.FromSignalModel(lookupSignalModel) + if err != nil { + return err + } + if !propellervalidators.AreTypesCastable(lookupSignal.Type, valueType) { + return errors.NewFlyteAdminErrorf(codes.InvalidArgument, + "requested signal value [%v] is not castable to existing signal type [%v]", + request.Value, lookupSignalModel.Type) + } + + return nil +} diff --git a/pkg/manager/impl/validation/signal_validator_test.go b/pkg/manager/impl/validation/signal_validator_test.go new file mode 100644 index 000000000..331da688c --- /dev/null +++ b/pkg/manager/impl/validation/signal_validator_test.go @@ -0,0 +1,287 @@ +package validation + +import ( + "context" + "errors" + "testing" + + repositoryMocks "github.com/flyteorg/flyteadmin/pkg/repositories/mocks" + "github.com/flyteorg/flyteadmin/pkg/repositories/models" + + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" + + "github.com/golang/protobuf/proto" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestValidateSignalGetOrCreateRequest(t *testing.T) { + ctx := context.TODO() + + t.Run("Happy", func(t *testing.T) { + request := admin.SignalGetOrCreateRequest{ + Id: &core.SignalIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Project: "project", + Domain: "domain", + Name: "name", + }, + SignalId: "signal", + }, + Type: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_BOOLEAN, + }, + }, + } + assert.NoError(t, ValidateSignalGetOrCreateRequest(ctx, request)) + }) + + t.Run("MissingSignalIdentifier", func(t *testing.T) { + request := admin.SignalGetOrCreateRequest{ + Type: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_BOOLEAN, + }, + }, + } + assert.EqualError(t, ValidateSignalGetOrCreateRequest(ctx, request), "missing id") + }) + + t.Run("InvalidSignalIdentifier", func(t *testing.T) { + request := admin.SignalGetOrCreateRequest{ + Id: &core.SignalIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Project: "project", + Domain: "domain", + Name: "name", + }, + }, + Type: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_BOOLEAN, + }, + }, + } + assert.EqualError(t, ValidateSignalGetOrCreateRequest(ctx, request), "missing signal_id") + }) + + t.Run("MissingExecutionIdentifier", func(t *testing.T) { + request := admin.SignalGetOrCreateRequest{ + Id: &core.SignalIdentifier{ + SignalId: "signal", + }, + Type: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_BOOLEAN, + }, + }, + } + assert.EqualError(t, ValidateSignalGetOrCreateRequest(ctx, request), "missing execution_id") + }) + + t.Run("InvalidExecutionIdentifier", func(t *testing.T) { + request := admin.SignalGetOrCreateRequest{ + Id: &core.SignalIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Domain: "domain", + Name: "name", + }, + SignalId: "signal", + }, + Type: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_BOOLEAN, + }, + }, + } + assert.EqualError(t, ValidateSignalGetOrCreateRequest(ctx, request), "missing project") + }) + + t.Run("MissingType", func(t *testing.T) { + request := admin.SignalGetOrCreateRequest{ + Id: &core.SignalIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Project: "project", + Domain: "domain", + Name: "name", + }, + SignalId: "signal", + }, + } + assert.EqualError(t, ValidateSignalGetOrCreateRequest(ctx, request), "missing type") + }) +} + +func TestValidateSignalListrequest(t *testing.T) { + ctx := context.TODO() + + t.Run("Happy", func(t *testing.T) { + request := admin.SignalListRequest{ + WorkflowExecutionId: &core.WorkflowExecutionIdentifier{ + Project: "project", + Domain: "domain", + Name: "name", + }, + Limit: 20, + } + assert.NoError(t, ValidateSignalListRequest(ctx, request)) + }) + + t.Run("MissingWorkflowExecutionIdentifier", func(t *testing.T) { + request := admin.SignalListRequest{ + Limit: 20, + } + assert.EqualError(t, ValidateSignalListRequest(ctx, request), "missing execution_id") + }) + + t.Run("MissingLimit", func(t *testing.T) { + request := admin.SignalListRequest{ + WorkflowExecutionId: &core.WorkflowExecutionIdentifier{ + Project: "project", + Domain: "domain", + Name: "name", + }, + } + assert.EqualError(t, ValidateSignalListRequest(ctx, request), "invalid value for limit") + }) +} + +func TestValidateSignalUpdateRequest(t *testing.T) { + ctx := context.TODO() + + booleanType := &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_BOOLEAN, + }, + } + typeBytes, _ := proto.Marshal(booleanType) + + repo := repositoryMocks.NewMockRepository() + repo.SignalRepo().(*repositoryMocks.SignalRepoInterface). + OnGetMatch(mock.Anything, mock.Anything).Return( + models.Signal{ + Type: typeBytes, + }, + nil, + ) + + t.Run("Happy", func(t *testing.T) { + request := admin.SignalSetRequest{ + Id: &core.SignalIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Project: "project", + Domain: "domain", + Name: "name", + }, + SignalId: "signal", + }, + Value: &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_Boolean{ + Boolean: false, + }, + }, + }, + }, + }, + }, + } + assert.NoError(t, ValidateSignalSetRequest(ctx, repo, request)) + }) + + t.Run("MissingValue", func(t *testing.T) { + request := admin.SignalSetRequest{ + Id: &core.SignalIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Project: "project", + Domain: "domain", + Name: "name", + }, + SignalId: "signal", + }, + } + assert.EqualError(t, ValidateSignalSetRequest(ctx, repo, request), "missing value") + }) + + t.Run("MissingSignal", func(t *testing.T) { + repo := repositoryMocks.NewMockRepository() + repo.SignalRepo().(*repositoryMocks.SignalRepoInterface). + OnGetMatch(mock.Anything, mock.Anything).Return(models.Signal{}, errors.New("foo")) + + request := admin.SignalSetRequest{ + Id: &core.SignalIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Project: "project", + Domain: "domain", + Name: "name", + }, + SignalId: "signal", + }, + Value: &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_Boolean{ + Boolean: false, + }, + }, + }, + }, + }, + }, + } + assert.EqualError(t, ValidateSignalSetRequest(ctx, repo, request), + "failed to validate that signal [{{project domain name} signal}] exists, err: [foo]") + }) + + t.Run("InvalidType", func(t *testing.T) { + integerType := &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + } + typeBytes, _ := proto.Marshal(integerType) + + repo := repositoryMocks.NewMockRepository() + repo.SignalRepo().(*repositoryMocks.SignalRepoInterface). + OnGetMatch(mock.Anything, mock.Anything).Return( + models.Signal{ + Type: typeBytes, + }, + nil, + ) + + request := admin.SignalSetRequest{ + Id: &core.SignalIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Project: "project", + Domain: "domain", + Name: "name", + }, + SignalId: "signal", + }, + Value: &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_Boolean{ + Boolean: false, + }, + }, + }, + }, + }, + }, + } + assert.EqualError(t, ValidateSignalSetRequest(ctx, repo, request), + "requested signal value [scalar: > ] is not castable to existing signal type [[8 1]]") + }) +} diff --git a/pkg/manager/impl/validation/task_validator.go b/pkg/manager/impl/validation/task_validator.go index c1b440b83..c8625ec4b 100644 --- a/pkg/manager/impl/validation/task_validator.go +++ b/pkg/manager/impl/validation/task_validator.go @@ -13,6 +13,7 @@ import ( "github.com/flyteorg/flyteadmin/pkg/manager/impl/shared" runtime "github.com/flyteorg/flyteadmin/pkg/runtime/interfaces" runtimeInterfaces "github.com/flyteorg/flyteadmin/pkg/runtime/interfaces" + workflowengineInterfaces "github.com/flyteorg/flyteadmin/pkg/workflowengine/interfaces" "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" "github.com/flyteorg/flytestdlib/logger" @@ -25,7 +26,7 @@ import ( var whitelistedTaskErr = errors.NewFlyteAdminErrorf(codes.InvalidArgument, "task type must be whitelisted before use") // This is called for a task with a non-nil container. -func validateContainer(task core.TaskTemplate, taskConfig runtime.TaskResourceConfiguration) error { +func validateContainer(task core.TaskTemplate, platformTaskResources workflowengineInterfaces.TaskResources) error { if err := ValidateEmptyStringField(task.GetContainer().Image, shared.Image); err != nil { return err } @@ -33,7 +34,7 @@ func validateContainer(task core.TaskTemplate, taskConfig runtime.TaskResourceCo if task.GetContainer().Resources == nil { return nil } - if err := validateTaskResources(task.Id, taskConfig.GetLimits(), task.GetContainer().Resources.Requests, + if err := validateTaskResources(task.Id, platformTaskResources.Limits, task.GetContainer().Resources.Requests, task.GetContainer().Resources.Limits); err != nil { logger.Debugf(context.Background(), "encountered errors validating task resources for [%+v]: %v", task.Id, err) @@ -43,7 +44,7 @@ func validateContainer(task core.TaskTemplate, taskConfig runtime.TaskResourceCo } // This is called for a task with a non-nil k8s pod. -func validateK8sPod(task core.TaskTemplate, taskConfig runtime.TaskResourceConfiguration) error { +func validateK8sPod(task core.TaskTemplate, platformTaskResources workflowengineInterfaces.TaskResources) error { if task.GetK8SPod().PodSpec == nil { return errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid TaskSpecification, pod tasks should specify their target as a K8sPod with a defined pod spec") @@ -54,7 +55,7 @@ func validateK8sPod(task core.TaskTemplate, taskConfig runtime.TaskResourceConfi task.GetK8SPod().PodSpec, err) return err } - platformTaskResourceLimits := taskResourceSetToMap(taskConfig.GetLimits()) + platformTaskResourceLimits := taskResourceSetToMap(platformTaskResources.Limits) for _, container := range podSpec.Containers { err := validateResource(task.Id, resourceListToQuantity(container.Resources.Requests), resourceListToQuantity(container.Resources.Limits), platformTaskResourceLimits) @@ -76,7 +77,7 @@ func validateRuntimeMetadata(metadata core.RuntimeMetadata) error { } func validateTaskTemplate(taskID core.Identifier, task core.TaskTemplate, - taskConfig runtime.TaskResourceConfiguration, whitelistConfig runtime.WhitelistConfiguration) error { + platformTaskResources workflowengineInterfaces.TaskResources, whitelistConfig runtime.WhitelistConfiguration) error { if err := ValidateEmptyStringField(task.Type, shared.Type); err != nil { return err @@ -98,17 +99,17 @@ func validateTaskTemplate(taskID core.Identifier, task core.TaskTemplate, } if task.GetContainer() != nil { - return validateContainer(task, taskConfig) + return validateContainer(task, platformTaskResources) } if task.GetK8SPod() != nil { - return validateK8sPod(task, taskConfig) + return validateK8sPod(task, platformTaskResources) } return nil } func ValidateTask( ctx context.Context, request admin.TaskCreateRequest, db repositoryInterfaces.Repository, - taskConfig runtime.TaskResourceConfiguration, whitelistConfig runtime.WhitelistConfiguration, + platformTaskResources workflowengineInterfaces.TaskResources, whitelistConfig runtime.WhitelistConfiguration, applicationConfig runtime.ApplicationConfiguration) error { if err := ValidateIdentifier(request.Id, common.Task); err != nil { return err @@ -119,7 +120,7 @@ func ValidateTask( if request.Spec == nil || request.Spec.Template == nil { return shared.GetMissingArgumentError(shared.Spec) } - return validateTaskTemplate(*request.Id, *request.Spec.Template, taskConfig, whitelistConfig) + return validateTaskTemplate(*request.Id, *request.Spec.Template, platformTaskResources, whitelistConfig) } func taskResourceSetToMap( diff --git a/pkg/manager/impl/validation/task_validator_test.go b/pkg/manager/impl/validation/task_validator_test.go index c221fb5ea..78ec0309c 100644 --- a/pkg/manager/impl/validation/task_validator_test.go +++ b/pkg/manager/impl/validation/task_validator_test.go @@ -6,28 +6,28 @@ import ( "errors" "testing" - "google.golang.org/protobuf/types/known/structpb" - - corev1 "k8s.io/api/core/v1" - - "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" - "k8s.io/apimachinery/pkg/api/resource" - "github.com/flyteorg/flyteadmin/pkg/manager/impl/testutils" runtimeInterfaces "github.com/flyteorg/flyteadmin/pkg/runtime/interfaces" runtimeMocks "github.com/flyteorg/flyteadmin/pkg/runtime/mocks" + workflowengineInterfaces "github.com/flyteorg/flyteadmin/pkg/workflowengine/interfaces" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" + "github.com/stretchr/testify/assert" + + "google.golang.org/protobuf/types/known/structpb" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" ) -func getMockTaskConfigProvider() runtimeInterfaces.TaskResourceConfiguration { - var taskConfig = runtimeMocks.MockTaskResourceConfiguration{} - taskConfig.Limits = runtimeInterfaces.TaskResourceSet{ - Memory: resource.MustParse("500Mi"), - CPU: resource.MustParse("200m"), - GPU: resource.MustParse("8"), +func getMockTaskResources() workflowengineInterfaces.TaskResources { + return workflowengineInterfaces.TaskResources{ + Limits: runtimeInterfaces.TaskResourceSet{ + Memory: resource.MustParse("500Mi"), + CPU: resource.MustParse("200m"), + GPU: resource.MustParse("8"), + }, } - - return &taskConfig } var mockWhitelistConfigProvider = runtimeMocks.NewMockWhitelistConfiguration() @@ -47,26 +47,26 @@ func TestValidateTask(t *testing.T) { } request.Spec.Template.GetContainer().Resources = &core.Resources{Requests: resources} err := ValidateTask(context.Background(), request, testutils.GetRepoWithDefaultProject(), - getMockTaskConfigProvider(), mockWhitelistConfigProvider, taskApplicationConfigProvider) + getMockTaskResources(), mockWhitelistConfigProvider, taskApplicationConfigProvider) assert.EqualError(t, err, "Requested CPU default [1536Mi] is greater than current limit set in the platform configuration [200m]. Please contact Flyte Admins to change these limits or consult the configuration") request.Spec.Template.Target = &core.TaskTemplate_K8SPod{K8SPod: &core.K8SPod{}} err = ValidateTask(context.Background(), request, testutils.GetRepoWithDefaultProject(), - getMockTaskConfigProvider(), mockWhitelistConfigProvider, taskApplicationConfigProvider) + getMockTaskResources(), mockWhitelistConfigProvider, taskApplicationConfigProvider) assert.EqualError(t, err, "invalid TaskSpecification, pod tasks should specify their target as a K8sPod with a defined pod spec") resourceList := corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1.5Gi")} podSpec := &corev1.PodSpec{Containers: []corev1.Container{{Resources: corev1.ResourceRequirements{Requests: resourceList}}}} request.Spec.Template.Target = &core.TaskTemplate_K8SPod{K8SPod: &core.K8SPod{PodSpec: transformStructToStructPB(t, podSpec)}} err = ValidateTask(context.Background(), request, testutils.GetRepoWithDefaultProject(), - getMockTaskConfigProvider(), mockWhitelistConfigProvider, taskApplicationConfigProvider) + getMockTaskResources(), mockWhitelistConfigProvider, taskApplicationConfigProvider) assert.EqualError(t, err, "Requested CPU default [1536Mi] is greater than current limit set in the platform configuration [200m]. Please contact Flyte Admins to change these limits or consult the configuration") resourceList = corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("200m")} podSpec = &corev1.PodSpec{Containers: []corev1.Container{{Resources: corev1.ResourceRequirements{Requests: resourceList}}}} request.Spec.Template.Target = &core.TaskTemplate_K8SPod{K8SPod: &core.K8SPod{PodSpec: transformStructToStructPB(t, podSpec)}} err = ValidateTask(context.Background(), request, testutils.GetRepoWithDefaultProject(), - getMockTaskConfigProvider(), mockWhitelistConfigProvider, taskApplicationConfigProvider) + getMockTaskResources(), mockWhitelistConfigProvider, taskApplicationConfigProvider) assert.Nil(t, err) } @@ -85,14 +85,14 @@ func TestValidateTaskEmptyProject(t *testing.T) { request := testutils.GetValidTaskRequest() request.Id.Project = "" err := ValidateTask(context.Background(), request, testutils.GetRepoWithDefaultProject(), - getMockTaskConfigProvider(), mockWhitelistConfigProvider, taskApplicationConfigProvider) + getMockTaskResources(), mockWhitelistConfigProvider, taskApplicationConfigProvider) assert.EqualError(t, err, "missing project") } func TestValidateTaskInvalidProjectAndDomain(t *testing.T) { request := testutils.GetValidTaskRequest() err := ValidateTask(context.Background(), request, testutils.GetRepoWithDefaultProjectAndErr(errors.New("foo")), - getMockTaskConfigProvider(), mockWhitelistConfigProvider, taskApplicationConfigProvider) + getMockTaskResources(), mockWhitelistConfigProvider, taskApplicationConfigProvider) assert.EqualError(t, err, "failed to validate that project [project] and domain [domain] are registered, err: [foo]") } @@ -100,7 +100,7 @@ func TestValidateTaskEmptyDomain(t *testing.T) { request := testutils.GetValidTaskRequest() request.Id.Domain = "" err := ValidateTask(context.Background(), request, testutils.GetRepoWithDefaultProject(), - getMockTaskConfigProvider(), mockWhitelistConfigProvider, taskApplicationConfigProvider) + getMockTaskResources(), mockWhitelistConfigProvider, taskApplicationConfigProvider) assert.EqualError(t, err, "missing domain") } @@ -108,7 +108,7 @@ func TestValidateTaskEmptyName(t *testing.T) { request := testutils.GetValidTaskRequest() request.Id.Name = "" err := ValidateTask(context.Background(), request, testutils.GetRepoWithDefaultProject(), - getMockTaskConfigProvider(), mockWhitelistConfigProvider, taskApplicationConfigProvider) + getMockTaskResources(), mockWhitelistConfigProvider, taskApplicationConfigProvider) assert.EqualError(t, err, "missing name") } @@ -116,7 +116,7 @@ func TestValidateTaskEmptyVersion(t *testing.T) { request := testutils.GetValidTaskRequest() request.Id.Version = "" err := ValidateTask(context.Background(), request, testutils.GetRepoWithDefaultProject(), - getMockTaskConfigProvider(), mockWhitelistConfigProvider, taskApplicationConfigProvider) + getMockTaskResources(), mockWhitelistConfigProvider, taskApplicationConfigProvider) assert.EqualError(t, err, "missing version") } @@ -124,7 +124,7 @@ func TestValidateTaskEmptyType(t *testing.T) { request := testutils.GetValidTaskRequest() request.Spec.Template.Type = "" err := ValidateTask(context.Background(), request, testutils.GetRepoWithDefaultProject(), - getMockTaskConfigProvider(), mockWhitelistConfigProvider, taskApplicationConfigProvider) + getMockTaskResources(), mockWhitelistConfigProvider, taskApplicationConfigProvider) assert.EqualError(t, err, "missing type") } @@ -132,7 +132,7 @@ func TestValidateTaskEmptyMetadata(t *testing.T) { request := testutils.GetValidTaskRequest() request.Spec.Template.Metadata = nil err := ValidateTask(context.Background(), request, testutils.GetRepoWithDefaultProject(), - getMockTaskConfigProvider(), mockWhitelistConfigProvider, taskApplicationConfigProvider) + getMockTaskResources(), mockWhitelistConfigProvider, taskApplicationConfigProvider) assert.EqualError(t, err, "missing metadata") } @@ -140,7 +140,7 @@ func TestValidateTaskEmptyRuntimeVersion(t *testing.T) { request := testutils.GetValidTaskRequest() request.Spec.Template.Metadata.Runtime.Version = "" err := ValidateTask(context.Background(), request, testutils.GetRepoWithDefaultProject(), - getMockTaskConfigProvider(), mockWhitelistConfigProvider, taskApplicationConfigProvider) + getMockTaskResources(), mockWhitelistConfigProvider, taskApplicationConfigProvider) assert.EqualError(t, err, "missing runtime version") } @@ -148,7 +148,7 @@ func TestValidateTaskEmptyTypedInterface(t *testing.T) { request := testutils.GetValidTaskRequest() request.Spec.Template.Interface = nil err := ValidateTask(context.Background(), request, testutils.GetRepoWithDefaultProject(), - getMockTaskConfigProvider(), mockWhitelistConfigProvider, taskApplicationConfigProvider) + getMockTaskResources(), mockWhitelistConfigProvider, taskApplicationConfigProvider) assert.EqualError(t, err, "missing typed interface") } @@ -156,7 +156,7 @@ func TestValidateTaskEmptyContainer(t *testing.T) { request := testutils.GetValidTaskRequest() request.Spec.Template.Target = nil err := ValidateTask(context.Background(), request, testutils.GetRepoWithDefaultProject(), - getMockTaskConfigProvider(), mockWhitelistConfigProvider, taskApplicationConfigProvider) + getMockTaskResources(), mockWhitelistConfigProvider, taskApplicationConfigProvider) assert.Nil(t, err) } @@ -164,7 +164,7 @@ func TestValidateTaskEmptyImage(t *testing.T) { request := testutils.GetValidTaskRequest() request.Spec.Template.GetContainer().Image = "" err := ValidateTask(context.Background(), request, testutils.GetRepoWithDefaultProject(), - getMockTaskConfigProvider(), mockWhitelistConfigProvider, taskApplicationConfigProvider) + getMockTaskResources(), mockWhitelistConfigProvider, taskApplicationConfigProvider) assert.EqualError(t, err, "missing image") } diff --git a/pkg/manager/impl/validation/validation.go b/pkg/manager/impl/validation/validation.go index 18e13ec44..e6c7cfae2 100644 --- a/pkg/manager/impl/validation/validation.go +++ b/pkg/manager/impl/validation/validation.go @@ -1,6 +1,7 @@ package validation import ( + "net/url" "strconv" "strings" @@ -22,6 +23,9 @@ var entityToResourceType = map[common.Entity]core.ResourceType{ common.LaunchPlan: core.ResourceType_LAUNCH_PLAN, } +// See https://www.rfc-editor.org/rfc/rfc3986#section-2.2 +var uriReservedChars = "!*'();:@&=+$,/?#[]" + func ValidateEmptyStringField(field, fieldName string) error { if field == "" { return shared.GetMissingArgumentError(fieldName) @@ -132,6 +136,10 @@ func ValidateVersion(version string) error { if err := ValidateEmptyStringField(version, shared.Version); err != nil { return err } + sanitizedVersion := url.QueryEscape(version) + if !strings.EqualFold(sanitizedVersion, version) { + return errors.NewFlyteAdminErrorf(codes.InvalidArgument, "version [%s] must be url safe, cannot contains chars [%s]", version, uriReservedChars) + } return nil } @@ -151,6 +159,25 @@ func ValidateResourceListRequest(request admin.ResourceListRequest) error { return nil } +func ValidateDescriptionEntityListRequest(request admin.DescriptionEntityListRequest) error { + if request.Id == nil { + return shared.GetMissingArgumentError(shared.ID) + } + if err := ValidateEmptyStringField(request.Id.Project, shared.Project); err != nil { + return err + } + if err := ValidateEmptyStringField(request.Id.Domain, shared.Domain); err != nil { + return err + } + if err := ValidateEmptyStringField(request.Id.Name, shared.Name); err != nil { + return err + } + if err := ValidateLimit(request.Limit); err != nil { + return err + } + return nil +} + func ValidateActiveLaunchPlanRequest(request admin.ActiveLaunchPlanRequest) error { if err := ValidateEmptyStringField(request.Id.Project, shared.Project); err != nil { return err @@ -190,6 +217,16 @@ func ValidateNamedEntityIdentifierListRequest(request admin.NamedEntityIdentifie return nil } +func ValidateDescriptionEntityGetRequest(request admin.ObjectGetRequest) error { + if err := ValidateResourceType(request.Id.ResourceType); err != nil { + return err + } + if err := ValidateIdentifierFieldsSet(request.Id); err != nil { + return err + } + return nil +} + func validateLiteralMap(inputMap *core.LiteralMap, fieldName string) error { if inputMap != nil && len(inputMap.Literals) > 0 { for name, fixedInput := range inputMap.Literals { diff --git a/pkg/manager/impl/validation/validation_test.go b/pkg/manager/impl/validation/validation_test.go index 417ad9e5e..80830ce03 100644 --- a/pkg/manager/impl/validation/validation_test.go +++ b/pkg/manager/impl/validation/validation_test.go @@ -1,6 +1,7 @@ package validation import ( + "fmt" "testing" "time" @@ -94,9 +95,94 @@ func TestValidateNamedEntityIdentifierListRequest(t *testing.T) { })) } +func TestValidateDescriptionEntityIdentifierGetRequest(t *testing.T) { + assert.Nil(t, ValidateDescriptionEntityGetRequest(admin.ObjectGetRequest{ + Id: &core.Identifier{ + ResourceType: core.ResourceType_WORKFLOW, + Project: "project", + Domain: "domain", + Name: "name", + Version: "v1", + }, + })) + + assert.NotNil(t, ValidateDescriptionEntityGetRequest(admin.ObjectGetRequest{ + Id: &core.Identifier{ + Project: "project", + }, + })) + + assert.NotNil(t, ValidateDescriptionEntityGetRequest(admin.ObjectGetRequest{ + Id: &core.Identifier{ + ResourceType: core.ResourceType_WORKFLOW, + Project: "project", + }, + })) +} + +func TestValidateDescriptionEntityListRequest(t *testing.T) { + assert.Nil(t, ValidateDescriptionEntityListRequest(admin.DescriptionEntityListRequest{ + ResourceType: core.ResourceType_WORKFLOW, + Id: &admin.NamedEntityIdentifier{ + Project: "project", + Domain: "domain", + Name: "name", + }, + Limit: 1, + })) + + assert.NotNil(t, ValidateDescriptionEntityListRequest(admin.DescriptionEntityListRequest{ + ResourceType: core.ResourceType_WORKFLOW, + Id: &admin.NamedEntityIdentifier{ + Project: "project", + Domain: "domain", + Name: "name", + }, + })) + + assert.NotNil(t, ValidateDescriptionEntityListRequest(admin.DescriptionEntityListRequest{ + Id: nil, + })) + + assert.NotNil(t, ValidateDescriptionEntityListRequest(admin.DescriptionEntityListRequest{ + ResourceType: core.ResourceType_WORKFLOW, + Id: nil, + })) + + assert.NotNil(t, ValidateDescriptionEntityListRequest(admin.DescriptionEntityListRequest{ + ResourceType: core.ResourceType_WORKFLOW, + Id: &admin.NamedEntityIdentifier{ + Domain: "domain", + }, + })) + + assert.NotNil(t, ValidateDescriptionEntityListRequest(admin.DescriptionEntityListRequest{ + ResourceType: core.ResourceType_WORKFLOW, + Id: &admin.NamedEntityIdentifier{ + Project: "project", + }, + })) + + assert.NotNil(t, ValidateDescriptionEntityListRequest(admin.DescriptionEntityListRequest{ + ResourceType: core.ResourceType_WORKFLOW, + Id: &admin.NamedEntityIdentifier{ + Project: "project", + Domain: "domain", + }, + })) +} + func TestValidateVersion(t *testing.T) { err := ValidateVersion("") assert.EqualError(t, err, "missing version") + + t.Run("url safe versions only", func(t *testing.T) { + assert.NoError(t, ValidateVersion("Foo123")) + for _, reservedChar := range uriReservedChars { + invalidVersion := fmt.Sprintf("foo%c", reservedChar) + assert.NotNil(t, ValidateVersion(invalidVersion)) + } + }) } func TestValidateListTaskRequest(t *testing.T) { diff --git a/pkg/manager/impl/workflow_manager.go b/pkg/manager/impl/workflow_manager.go index d95216d24..09d6a0db2 100644 --- a/pkg/manager/impl/workflow_manager.go +++ b/pkg/manager/impl/workflow_manager.go @@ -161,16 +161,15 @@ func (w *WorkflowManager) CreateWorkflow( } // Assert that a matching workflow doesn't already exist before uploading the workflow closure. - existingMatchingWorkflow, err := util.GetWorkflowModel(ctx, w.db, *request.Id) + existingWorkflowModel, err := util.GetWorkflowModel(ctx, w.db, *request.Id) // Check that no identical or conflicting workflows exist. if err == nil { // A workflow's structure is uniquely defined by its collection of nodes. - if bytes.Equal(workflowDigest, existingMatchingWorkflow.Digest) { - return nil, errors.NewFlyteAdminErrorf( - codes.AlreadyExists, "identical workflow already exists with id %v", request.Id) + if bytes.Equal(workflowDigest, existingWorkflowModel.Digest) { + return nil, errors.NewWorkflowExistsIdenticalStructureError(ctx, &request) } - return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, - "workflow with different structure already exists with id %v", request.Id) + // A workflow exists with different structure + return nil, errors.NewWorkflowExistsDifferentStructureError(ctx, &request) } else if flyteAdminError, ok := err.(errors.FlyteAdminError); !ok || flyteAdminError.Code() != codes.NotFound { logger.Debugf(ctx, "Failed to get workflow for comparison in CreateWorkflow with ID [%+v] with err %v", request.Id, err) @@ -203,11 +202,21 @@ func (w *WorkflowManager) CreateWorkflow( finalizedRequest, remoteClosureDataRef.String(), err) return nil, err } - if err = w.db.WorkflowRepo().Create(ctx, workflowModel); err != nil { + descriptionModel, err := transformers.CreateDescriptionEntityModel(request.Spec.Description, *request.Id) + if err != nil { + logger.Errorf(ctx, + "Failed to transform description model [%+v] with err: %v", request.Spec.Description, err) + return nil, err + } + if descriptionModel != nil { + workflowModel.ShortDescription = descriptionModel.ShortDescription + } + if err = w.db.WorkflowRepo().Create(ctx, workflowModel, descriptionModel); err != nil { logger.Infof(ctx, "Failed to create workflow model [%+v] with err %v", request.Id, err) return nil, err } w.metrics.TypedInterfaceSizeBytes.Observe(float64(len(workflowModel.TypedInterface))) + return &admin.WorkflowCreateResponse{}, nil } diff --git a/pkg/manager/impl/workflow_manager_test.go b/pkg/manager/impl/workflow_manager_test.go index 1fb0647f2..cc30e8aaf 100644 --- a/pkg/manager/impl/workflow_manager_test.go +++ b/pkg/manager/impl/workflow_manager_test.go @@ -15,6 +15,7 @@ import ( "github.com/flyteorg/flyteadmin/pkg/repositories/models" "github.com/golang/protobuf/proto" + flyteErrors "github.com/flyteorg/flyteadmin/pkg/errors" runtimeInterfaces "github.com/flyteorg/flyteadmin/pkg/runtime/interfaces" runtimeMocks "github.com/flyteorg/flyteadmin/pkg/runtime/mocks" workflowengineInterfaces "github.com/flyteorg/flyteadmin/pkg/workflowengine/interfaces" @@ -131,7 +132,7 @@ func TestSetWorkflowDefaults(t *testing.T) { func TestCreateWorkflow(t *testing.T) { repository := getMockRepository(!returnWorkflowOnGet) var createCalled bool - repository.WorkflowRepo().(*repositoryMocks.MockWorkflowRepo).SetCreateCallback(func(input models.Workflow) error { + repository.WorkflowRepo().(*repositoryMocks.MockWorkflowRepo).SetCreateCallback(func(input models.Workflow, descriptionEntity *models.DescriptionEntity) error { assert.Equal(t, []byte{ 0x2c, 0x69, 0x58, 0x2f, 0xd5, 0x3e, 0x68, 0x7d, 0x5, 0x8e, 0xd9, 0xc8, 0x7d, 0xbd, 0xd1, 0xc7, 0xa7, 0x69, 0xeb, 0x2e, 0x54, 0x6, 0x3e, 0x67, 0x82, 0xcd, 0x54, 0x7a, 0x91, 0xb3, 0x35, 0x81}, input.Digest) @@ -149,6 +150,13 @@ func TestCreateWorkflow(t *testing.T) { expectedResponse := &admin.WorkflowCreateResponse{} assert.Equal(t, expectedResponse, response) assert.True(t, createCalled) + + repository.WorkflowRepo().(*repositoryMocks.MockWorkflowRepo).SetCreateCallback(func(input models.Workflow, descriptionEntity *models.DescriptionEntity) error { + return errors.New("failed to insert record into workflow table") + }) + response, err = workflowManager.CreateWorkflow(context.Background(), request) + assert.Error(t, err) + assert.Nil(t, response) } func TestCreateWorkflow_ValidationError(t *testing.T) { @@ -177,12 +185,11 @@ func TestCreateWorkflow_ExistingWorkflow(t *testing.T) { getMockWorkflowConfigProvider(), getMockWorkflowCompiler(), mockStorageClient, storagePrefix, mockScope.NewTestScope()) request := testutils.GetWorkflowRequest() response, err := workflowManager.CreateWorkflow(context.Background(), request) - assert.EqualError(t, err, "workflow with different structure already exists with id "+ - "resource_type:WORKFLOW project:\"project\" domain:\"domain\" name:\"name\" version:\"version\" ") + assert.EqualError(t, err, "workflow with different structure already exists") assert.Nil(t, response) } -func TestCreateWorkflow_ExistingWorkflow_NotIdentical(t *testing.T) { +func TestCreateWorkflow_ExistingWorkflow_Different(t *testing.T) { mockStorageClient := commonMocks.GetMockStorageClient() mockStorageClient.ComposedProtobufStore.(*commonMocks.TestDataStore).ReadProtobufCb = @@ -196,8 +203,9 @@ func TestCreateWorkflow_ExistingWorkflow_NotIdentical(t *testing.T) { request := testutils.GetWorkflowRequest() response, err := workflowManager.CreateWorkflow(context.Background(), request) - assert.EqualError(t, err, "workflow with different structure already exists with id "+ - "resource_type:WORKFLOW project:\"project\" domain:\"domain\" name:\"name\" version:\"version\" ") + assert.EqualError(t, err, "workflow with different structure already exists") + flyteErr := err.(flyteErrors.FlyteAdminError) + assert.Equal(t, codes.InvalidArgument, flyteErr.Code()) assert.Nil(t, response) } @@ -244,7 +252,7 @@ func TestCreateWorkflow_CompileWorkflowError(t *testing.T) { func TestCreateWorkflow_DatabaseError(t *testing.T) { repository := getMockRepository(!returnWorkflowOnGet) expectedErr := errors.New("expected error") - workflowCreateFunc := func(input models.Workflow) error { + workflowCreateFunc := func(input models.Workflow, descriptionEntity *models.DescriptionEntity) error { return expectedErr } diff --git a/pkg/manager/interfaces/description_entity.go b/pkg/manager/interfaces/description_entity.go new file mode 100644 index 000000000..88ac338c2 --- /dev/null +++ b/pkg/manager/interfaces/description_entity.go @@ -0,0 +1,13 @@ +package interfaces + +import ( + "context" + + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" +) + +// DescriptionEntityInterface for managing DescriptionEntity +type DescriptionEntityInterface interface { + GetDescriptionEntity(ctx context.Context, request admin.ObjectGetRequest) (*admin.DescriptionEntity, error) + ListDescriptionEntity(ctx context.Context, request admin.DescriptionEntityListRequest) (*admin.DescriptionEntityList, error) +} diff --git a/pkg/manager/interfaces/metrics.go b/pkg/manager/interfaces/metrics.go new file mode 100644 index 000000000..d726cdc99 --- /dev/null +++ b/pkg/manager/interfaces/metrics.go @@ -0,0 +1,15 @@ +package interfaces + +import ( + "context" + + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" +) + +//go:generate mockery -name=MetricsInterface -output=../mocks -case=underscore + +// Interface for managing Flyte execution metrics +type MetricsInterface interface { + GetExecutionMetrics(ctx context.Context, request admin.WorkflowExecutionGetMetricsRequest) ( + *admin.WorkflowExecutionGetMetricsResponse, error) +} diff --git a/pkg/manager/interfaces/signal.go b/pkg/manager/interfaces/signal.go new file mode 100644 index 000000000..0547e439d --- /dev/null +++ b/pkg/manager/interfaces/signal.go @@ -0,0 +1,16 @@ +package interfaces + +import ( + "context" + + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" +) + +//go:generate mockery -name=SignalInterface -output=../mocks -case=underscore + +// Interface for managing Flyte Signals +type SignalInterface interface { + GetOrCreateSignal(ctx context.Context, request admin.SignalGetOrCreateRequest) (*admin.Signal, error) + ListSignals(ctx context.Context, request admin.SignalListRequest) (*admin.SignalList, error) + SetSignal(ctx context.Context, request admin.SignalSetRequest) (*admin.SignalSetResponse, error) +} diff --git a/pkg/manager/mocks/metrics_interface.go b/pkg/manager/mocks/metrics_interface.go new file mode 100644 index 000000000..2e292593e --- /dev/null +++ b/pkg/manager/mocks/metrics_interface.go @@ -0,0 +1,57 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + admin "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" + + mock "github.com/stretchr/testify/mock" +) + +// MetricsInterface is an autogenerated mock type for the MetricsInterface type +type MetricsInterface struct { + mock.Mock +} + +type MetricsInterface_GetExecutionMetrics struct { + *mock.Call +} + +func (_m MetricsInterface_GetExecutionMetrics) Return(_a0 *admin.WorkflowExecutionGetMetricsResponse, _a1 error) *MetricsInterface_GetExecutionMetrics { + return &MetricsInterface_GetExecutionMetrics{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *MetricsInterface) OnGetExecutionMetrics(ctx context.Context, request admin.WorkflowExecutionGetMetricsRequest) *MetricsInterface_GetExecutionMetrics { + c_call := _m.On("GetExecutionMetrics", ctx, request) + return &MetricsInterface_GetExecutionMetrics{Call: c_call} +} + +func (_m *MetricsInterface) OnGetExecutionMetricsMatch(matchers ...interface{}) *MetricsInterface_GetExecutionMetrics { + c_call := _m.On("GetExecutionMetrics", matchers...) + return &MetricsInterface_GetExecutionMetrics{Call: c_call} +} + +// GetExecutionMetrics provides a mock function with given fields: ctx, request +func (_m *MetricsInterface) GetExecutionMetrics(ctx context.Context, request admin.WorkflowExecutionGetMetricsRequest) (*admin.WorkflowExecutionGetMetricsResponse, error) { + ret := _m.Called(ctx, request) + + var r0 *admin.WorkflowExecutionGetMetricsResponse + if rf, ok := ret.Get(0).(func(context.Context, admin.WorkflowExecutionGetMetricsRequest) *admin.WorkflowExecutionGetMetricsResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.WorkflowExecutionGetMetricsResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, admin.WorkflowExecutionGetMetricsRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/pkg/manager/mocks/signal_interface.go b/pkg/manager/mocks/signal_interface.go new file mode 100644 index 000000000..51e8b6636 --- /dev/null +++ b/pkg/manager/mocks/signal_interface.go @@ -0,0 +1,139 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + admin "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" + + mock "github.com/stretchr/testify/mock" +) + +// SignalInterface is an autogenerated mock type for the SignalInterface type +type SignalInterface struct { + mock.Mock +} + +type SignalInterface_GetOrCreateSignal struct { + *mock.Call +} + +func (_m SignalInterface_GetOrCreateSignal) Return(_a0 *admin.Signal, _a1 error) *SignalInterface_GetOrCreateSignal { + return &SignalInterface_GetOrCreateSignal{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *SignalInterface) OnGetOrCreateSignal(ctx context.Context, request admin.SignalGetOrCreateRequest) *SignalInterface_GetOrCreateSignal { + c_call := _m.On("GetOrCreateSignal", ctx, request) + return &SignalInterface_GetOrCreateSignal{Call: c_call} +} + +func (_m *SignalInterface) OnGetOrCreateSignalMatch(matchers ...interface{}) *SignalInterface_GetOrCreateSignal { + c_call := _m.On("GetOrCreateSignal", matchers...) + return &SignalInterface_GetOrCreateSignal{Call: c_call} +} + +// GetOrCreateSignal provides a mock function with given fields: ctx, request +func (_m *SignalInterface) GetOrCreateSignal(ctx context.Context, request admin.SignalGetOrCreateRequest) (*admin.Signal, error) { + ret := _m.Called(ctx, request) + + var r0 *admin.Signal + if rf, ok := ret.Get(0).(func(context.Context, admin.SignalGetOrCreateRequest) *admin.Signal); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.Signal) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, admin.SignalGetOrCreateRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type SignalInterface_ListSignals struct { + *mock.Call +} + +func (_m SignalInterface_ListSignals) Return(_a0 *admin.SignalList, _a1 error) *SignalInterface_ListSignals { + return &SignalInterface_ListSignals{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *SignalInterface) OnListSignals(ctx context.Context, request admin.SignalListRequest) *SignalInterface_ListSignals { + c_call := _m.On("ListSignals", ctx, request) + return &SignalInterface_ListSignals{Call: c_call} +} + +func (_m *SignalInterface) OnListSignalsMatch(matchers ...interface{}) *SignalInterface_ListSignals { + c_call := _m.On("ListSignals", matchers...) + return &SignalInterface_ListSignals{Call: c_call} +} + +// ListSignals provides a mock function with given fields: ctx, request +func (_m *SignalInterface) ListSignals(ctx context.Context, request admin.SignalListRequest) (*admin.SignalList, error) { + ret := _m.Called(ctx, request) + + var r0 *admin.SignalList + if rf, ok := ret.Get(0).(func(context.Context, admin.SignalListRequest) *admin.SignalList); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.SignalList) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, admin.SignalListRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type SignalInterface_SetSignal struct { + *mock.Call +} + +func (_m SignalInterface_SetSignal) Return(_a0 *admin.SignalSetResponse, _a1 error) *SignalInterface_SetSignal { + return &SignalInterface_SetSignal{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *SignalInterface) OnSetSignal(ctx context.Context, request admin.SignalSetRequest) *SignalInterface_SetSignal { + c_call := _m.On("SetSignal", ctx, request) + return &SignalInterface_SetSignal{Call: c_call} +} + +func (_m *SignalInterface) OnSetSignalMatch(matchers ...interface{}) *SignalInterface_SetSignal { + c_call := _m.On("SetSignal", matchers...) + return &SignalInterface_SetSignal{Call: c_call} +} + +// SetSignal provides a mock function with given fields: ctx, request +func (_m *SignalInterface) SetSignal(ctx context.Context, request admin.SignalSetRequest) (*admin.SignalSetResponse, error) { + ret := _m.Called(ctx, request) + + var r0 *admin.SignalSetResponse + if rf, ok := ret.Get(0).(func(context.Context, admin.SignalSetRequest) *admin.SignalSetResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.SignalSetResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, admin.SignalSetRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/pkg/manager/mocks/workflow.go b/pkg/manager/mocks/workflow.go index d1f55750e..5055b2b4c 100644 --- a/pkg/manager/mocks/workflow.go +++ b/pkg/manager/mocks/workflow.go @@ -7,9 +7,11 @@ import ( ) type CreateWorkflowFunc func(ctx context.Context, request admin.WorkflowCreateRequest) (*admin.WorkflowCreateResponse, error) +type GetWorkflowFunc func(ctx context.Context, request admin.ObjectGetRequest) (*admin.Workflow, error) type MockWorkflowManager struct { createWorkflowFunc CreateWorkflowFunc + getWorkflowFunc GetWorkflowFunc } func (r *MockWorkflowManager) SetCreateCallback(createFunction CreateWorkflowFunc) { @@ -30,8 +32,15 @@ func (r *MockWorkflowManager) ListWorkflows(ctx context.Context, return nil, nil } +func (r *MockWorkflowManager) SetGetCallback(getFunction GetWorkflowFunc) { + r.getWorkflowFunc = getFunction +} + func (r *MockWorkflowManager) GetWorkflow( ctx context.Context, request admin.ObjectGetRequest) (*admin.Workflow, error) { + if r.getWorkflowFunc != nil { + return r.getWorkflowFunc(ctx, request) + } return nil, nil } diff --git a/pkg/repositories/config/migration_models.go b/pkg/repositories/config/migration_models.go index b46b93858..4c0144697 100644 --- a/pkg/repositories/config/migration_models.go +++ b/pkg/repositories/config/migration_models.go @@ -10,6 +10,9 @@ import ( IMPORTANT: You'll observe several models are redefined below with named index tags *omitted*. This is because postgres requires that index names be unique across *all* tables. If you modify Task, Execution, NodeExecution or TaskExecution models in code be sure to update the appropriate duplicate definitions here. + That is, in the actual code, it makes more sense to re-use structs, like how NodeExecutionKey is in both NodeExecution + and in TaskExecution. But simply re-using in migrations would result in indices with the same name. + In the new model where all models are replicated in each function, this is not an issue. */ type TaskKey struct { diff --git a/pkg/repositories/config/migrations.go b/pkg/repositories/config/migrations.go index 7c4c9214d..a79a58b25 100644 --- a/pkg/repositories/config/migrations.go +++ b/pkg/repositories/config/migrations.go @@ -3,6 +3,11 @@ package config import ( "database/sql" "fmt" + "time" + + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flytestdlib/storage" "github.com/flyteorg/flyteadmin/pkg/repositories/models" schedulerModels "github.com/flyteorg/flyteadmin/scheduler/repositories/models" @@ -14,10 +19,10 @@ import ( var ( tables = []string{"execution_events", "executions", "launch_plans", "named_entity_metadata", "node_execution_events", "node_executions", "projects", "resources", "schedulable_entities", - "schedule_entities_snapshots", "task_executions", "tasks", "workflows"} + "schedule_entities_snapshots", "task_executions", "tasks", "workflows", "description_entities"} ) -var Migrations = []*gormigrate.Migration{ +var LegacyMigrations = []*gormigrate.Migration{ // Create projects table. { ID: "2019-05-22-projects", @@ -390,8 +395,562 @@ var Migrations = []*gormigrate.Migration{ return tx.Model(&models.Execution{}).Migrator().DropIndex(&models.Execution{}, "idx_executions_created_at") }, }, + // Create description entities table + { + ID: "2022-09-13-description-entities", + Migrate: func(tx *gorm.DB) error { + return tx.AutoMigrate(&models.DescriptionEntity{}) + }, + Rollback: func(tx *gorm.DB) error { + return tx.Migrator().DropTable("description_entities") + }, + }, + // Modify the tasks table, if necessary + { + ID: "2020-09-13-task-short_description", + Migrate: func(tx *gorm.DB) error { + return tx.Exec("ALTER TABLE tasks ADD COLUMN IF NOT EXISTS short_description varchar(4000)").Error + }, + Rollback: func(tx *gorm.DB) error { + return tx.Exec("ALTER TABLE tasks DROP COLUMN IF EXISTS short_description").Error + }, + }, + // Modify the workflows table, if necessary + { + ID: "2020-09-13-workflow-short_description", + Migrate: func(tx *gorm.DB) error { + return tx.Exec("ALTER TABLE workflows ADD COLUMN IF NOT EXISTS short_description varchar(4000)").Error + }, + Rollback: func(tx *gorm.DB) error { + return tx.Exec("ALTER TABLE workflows DROP COLUMN IF EXISTS short_description").Error + }, + }, + // Create signals table. + { + ID: "2022-04-11-signals", + Migrate: func(tx *gorm.DB) error { + return tx.AutoMigrate(&models.Signal{}) + }, + Rollback: func(tx *gorm.DB) error { + return tx.Migrator().DropTable("signals") + }, + }, + // Add the launch_type resource to the execution model + { + ID: "2022-12-09-execution-launch-type", + Migrate: func(tx *gorm.DB) error { + return tx.AutoMigrate(&models.Execution{}) + }, + Rollback: func(tx *gorm.DB) error { + return tx.Model(&models.Execution{}).Migrator().DropColumn(&models.Execution{}, "launch_entity") + }, + }, } +var NoopMigrations = []*gormigrate.Migration{ + /* The following is a series of Postgres specific migrations. They should mirror the state + of the database as of 2023 March. The rollback is a noop for everything because the migration itself should + be a noop. + */ + + { + ID: "pg-noop-2023-03-31-noop-project-3", + Migrate: func(tx *gorm.DB) error { + type Project struct { + ID uint `gorm:"index;autoIncrement;not null"` + CreatedAt time.Time `gorm:"type:time"` + UpdatedAt time.Time `gorm:"type:time"` + DeletedAt *time.Time `gorm:"index"` + Identifier string `gorm:"primary_key"` + Name string `valid:"length(0|255)"` // Human-readable name, not a unique identifier. + Description string `gorm:"type:varchar(300)"` + Labels []byte + // GORM doesn't save the zero value for ints, so we use a pointer for the State field + State *int32 `gorm:"default:0;index"` + } + return tx.AutoMigrate(&Project{}) + }, + Rollback: func(tx *gorm.DB) error { + return nil + }, + }, + // ALTER TABLE "projects" ALTER COLUMN "id" DROP NOT NULL otherwise. + + { + ID: "pg-noop-2023-03-31-noop-task-2", + Migrate: func(tx *gorm.DB) error { + type Task struct { + ID uint `gorm:"index;autoIncrement;not null"` + CreatedAt time.Time `gorm:"type:time"` + UpdatedAt time.Time `gorm:"type:time"` + DeletedAt *time.Time `gorm:"index"` + Project string `gorm:"primary_key;index:task_project_domain_name_idx;index:task_project_domain_idx" valid:"length(0|255)"` + Domain string `gorm:"primary_key;index:task_project_domain_name_idx;index:task_project_domain_idx" valid:"length(0|255)"` + Name string `gorm:"primary_key;index:task_project_domain_name_idx" valid:"length(0|255)"` + Version string `gorm:"primary_key" valid:"length(0|255)"` + Closure []byte `gorm:"not null"` + // Hash of the compiled task closure + Digest []byte + // Task type (also stored in the closure put promoted as a column for filtering). + Type string `gorm:"" valid:"length(0|255)"` + // ShortDescription for the task. + ShortDescription string + } + return tx.AutoMigrate(&Task{}) + }, + Rollback: func(tx *gorm.DB) error { + return nil + }, + }, + + { + ID: "pg-noop-2023-03-31-noop-workflow", + Migrate: func(tx *gorm.DB) error { + type Workflow struct { + ID uint `gorm:"index;autoIncrement;not null"` + CreatedAt time.Time `gorm:"type:time"` + UpdatedAt time.Time `gorm:"type:time"` + DeletedAt *time.Time `gorm:"index"` + Project string `gorm:"primary_key;index:workflow_project_domain_name_idx;index:workflow_project_domain_idx" valid:"length(0|255)"` + Domain string `gorm:"primary_key;index:workflow_project_domain_name_idx;index:workflow_project_domain_idx" valid:"length(0|255)"` + Name string `gorm:"primary_key;index:workflow_project_domain_name_idx" valid:"length(0|255)"` + Version string `gorm:"primary_key"` + TypedInterface []byte + RemoteClosureIdentifier string `gorm:"not null" valid:"length(0|255)"` + // Hash of the compiled workflow closure + Digest []byte + // ShortDescription for the workflow. + ShortDescription string + } + return tx.AutoMigrate(&Workflow{}) + }, + Rollback: func(tx *gorm.DB) error { + return nil + }, + }, + + { + ID: "pg-noop-2023-03-31-noop-launchplan", + Migrate: func(tx *gorm.DB) error { + type LaunchPlanScheduleType string + + type LaunchPlan struct { + ID uint `gorm:"index;autoIncrement;not null"` + CreatedAt time.Time `gorm:"type:time"` + UpdatedAt time.Time `gorm:"type:time"` + DeletedAt *time.Time `gorm:"index"` + Project string `gorm:"primary_key;index:lp_project_domain_name_idx,lp_project_domain_idx" valid:"length(0|255)"` + Domain string `gorm:"primary_key;index:lp_project_domain_name_idx,lp_project_domain_idx" valid:"length(0|255)"` + Name string `gorm:"primary_key;index:lp_project_domain_name_idx" valid:"length(0|255)"` + Version string `gorm:"primary_key" valid:"length(0|255)"` + Spec []byte `gorm:"not null"` + WorkflowID uint `gorm:"index"` + Closure []byte `gorm:"not null"` + // GORM doesn't save the zero value for ints, so we use a pointer for the State field + State *int32 `gorm:"default:0"` + // Hash of the launch plan + Digest []byte + ScheduleType LaunchPlanScheduleType + } + return tx.AutoMigrate(&LaunchPlan{}) + }, + Rollback: func(tx *gorm.DB) error { + return nil + }, + }, + + { + ID: "pg-noop-2023-03-31-noop-namedentitymetadata", + Migrate: func(tx *gorm.DB) error { + type NamedEntityMetadata struct { + ID uint `gorm:"index;autoIncrement;not null"` + CreatedAt time.Time `gorm:"type:time"` + UpdatedAt time.Time `gorm:"type:time"` + DeletedAt *time.Time `gorm:"index"` + ResourceType core.ResourceType `gorm:"primary_key;index:named_entity_metadata_type_project_domain_name_idx" valid:"length(0|255)"` + Project string `gorm:"primary_key;index:named_entity_metadata_type_project_domain_name_idx" valid:"length(0|255)"` + Domain string `gorm:"primary_key;index:named_entity_metadata_type_project_domain_name_idx" valid:"length(0|255)"` + Name string `gorm:"primary_key;index:named_entity_metadata_type_project_domain_name_idx" valid:"length(0|255)"` + Description string `gorm:"type:varchar(300)"` + // GORM doesn't save the zero value for ints, so we use a pointer for the State field + State *int32 `gorm:"default:0"` + } + + return tx.AutoMigrate(&NamedEntityMetadata{}) + }, + Rollback: func(tx *gorm.DB) error { + return nil + }, + }, + + { + ID: "pg-noop-2023-03-31-noop-execution", + Migrate: func(tx *gorm.DB) error { + type ExecutionKey struct { + Project string `gorm:"primary_key;column:execution_project" valid:"length(0|255)"` + Domain string `gorm:"primary_key;column:execution_domain" valid:"length(0|255)"` + Name string `gorm:"primary_key;column:execution_name" valid:"length(0|255)"` + } + + type Execution struct { + ID uint `gorm:"index;autoIncrement;not null"` + CreatedAt time.Time + UpdatedAt time.Time + DeletedAt *time.Time `gorm:"index"` + ExecutionKey + LaunchPlanID uint `gorm:"index"` + WorkflowID uint `gorm:"index"` + TaskID uint `gorm:"index"` + Phase string `valid:"length(0|255)"` + Closure []byte + Spec []byte `gorm:"not null"` + StartedAt *time.Time + // Corresponds to the CreatedAt field in the Execution closure. + // Prefixed with Execution to avoid clashes with gorm.Model CreatedAt + ExecutionCreatedAt *time.Time `gorm:"index:idx_executions_created_at"` + // Corresponds to the UpdatedAt field in the Execution closure + // Prefixed with Execution to avoid clashes with gorm.Model UpdatedAt + ExecutionUpdatedAt *time.Time + Duration time.Duration + // In the case of an aborted execution this string may be non-empty. + // It should be ignored for any other value of phase other than aborted. + AbortCause string `valid:"length(0|255)"` + // Corresponds to the execution mode used to trigger this execution + Mode int32 + // The "parent" execution (if there is one) that is related to this execution. + SourceExecutionID uint + // The parent node execution if this was launched by a node + ParentNodeExecutionID uint + // Cluster where execution was triggered + Cluster string `valid:"length(0|255)"` + // Offloaded location of inputs LiteralMap. These are the inputs evaluated and contain applied defaults. + InputsURI storage.DataReference + // User specified inputs. This map might be incomplete and not include defaults applied + UserInputsURI storage.DataReference + // Execution Error Kind. nullable + ErrorKind *string `gorm:"index"` + // Execution Error Code nullable + ErrorCode *string `valid:"length(0|255)"` + // The user responsible for launching this execution. + // This is also stored in the spec but promoted as a column for filtering. + User string `gorm:"index" valid:"length(0|255)"` + // GORM doesn't save the zero value for ints, so we use a pointer for the State field + State *int32 `gorm:"index;default:0"` + // The resource type of the entity used to launch the execution, one of 'launch_plan' or 'task' + LaunchEntity string + } + + return tx.AutoMigrate(&Execution{}) + }, + Rollback: func(tx *gorm.DB) error { + return nil + }, + }, + + { + ID: "pg-noop-2023-03-31-noop-taskexecution", + Migrate: func(tx *gorm.DB) error { + type TaskKey struct { + Project string `gorm:"primary_key"` + Domain string `gorm:"primary_key"` + Name string `gorm:"primary_key"` + Version string `gorm:"primary_key"` + } + type TaskExecutionKey struct { + TaskKey + Project string `gorm:"primary_key;column:execution_project;index:idx_task_executions_exec"` + Domain string `gorm:"primary_key;column:execution_domain;index:idx_task_executions_exec"` + Name string `gorm:"primary_key;column:execution_name;index:idx_task_executions_exec"` + NodeID string `gorm:"primary_key;index:idx_task_executions_exec;index"` + // *IMPORTANT* This is a pointer to an int in order to allow setting an empty ("0") value according to gorm convention. + // Because RetryAttempt is part of the TaskExecution primary key is should *never* be null. + RetryAttempt *uint32 `gorm:"primary_key;AUTO_INCREMENT:FALSE"` + } + type TaskExecution struct { + ID uint `gorm:"index;autoIncrement;not null"` + CreatedAt time.Time `gorm:"type:time"` + UpdatedAt time.Time `gorm:"type:time"` + DeletedAt *time.Time `gorm:"index"` + TaskExecutionKey + Phase string `gorm:"type:text"` + PhaseVersion uint32 + InputURI string `gorm:"type:text"` + Closure []byte + StartedAt *time.Time + // Corresponds to the CreatedAt field in the TaskExecution closure + // This field is prefixed with TaskExecution because it signifies when + // the execution was createdAt, not to be confused with gorm.Model.CreatedAt + TaskExecutionCreatedAt *time.Time + // Corresponds to the UpdatedAt field in the TaskExecution closure + // This field is prefixed with TaskExecution because it signifies when + // the execution was UpdatedAt, not to be confused with gorm.Model.UpdatedAt + TaskExecutionUpdatedAt *time.Time + Duration time.Duration + // The child node executions (if any) launched by this task execution. + ChildNodeExecution []NodeExecution `gorm:"foreignkey:ParentTaskExecutionID;references:ID"` + } + + return tx.AutoMigrate(&TaskExecution{}) + }, + Rollback: func(tx *gorm.DB) error { + return nil + }, + }, + + { + ID: "pg-noop-2023-03-31-noop-nodeexecution", + Migrate: func(tx *gorm.DB) error { + type ExecutionKey struct { + Project string `gorm:"primary_key;column:execution_project"` + Domain string `gorm:"primary_key;column:execution_domain"` + Name string `gorm:"primary_key;column:execution_name"` + } + + type NodeExecutionKey struct { + ExecutionKey + NodeID string `gorm:"primary_key;index"` + } + type NodeExecution struct { + ID uint `gorm:"index;autoIncrement;not null"` + CreatedAt time.Time `gorm:"type:time"` + UpdatedAt time.Time `gorm:"type:time"` + DeletedAt *time.Time `gorm:"index"` + NodeExecutionKey + // Also stored in the closure, but defined as a separate column because it's useful for filtering and sorting. + Phase string + InputURI string + Closure []byte + StartedAt *time.Time + // Corresponds to the CreatedAt field in the NodeExecution closure + // Prefixed with NodeExecution to avoid clashes with gorm.Model CreatedAt + NodeExecutionCreatedAt *time.Time + // Corresponds to the UpdatedAt field in the NodeExecution closure + // Prefixed with NodeExecution to avoid clashes with gorm.Model UpdatedAt + NodeExecutionUpdatedAt *time.Time + Duration time.Duration + // The task execution (if any) which launched this node execution. + // TO BE DEPRECATED - as we have now introduced ParentID + ParentTaskExecutionID uint `sql:"default:null" gorm:"index"` + // The workflow execution (if any) which this node execution launched + LaunchedExecution models.Execution `gorm:"foreignKey:ParentNodeExecutionID;references:ID"` + // In the case of dynamic workflow nodes, the remote closure is uploaded to the path specified here. + DynamicWorkflowRemoteClosureReference string + // Metadata that is only relevant to the flyteadmin service that is used to parse the model and track additional attributes. + InternalData []byte + NodeExecutionMetadata []byte + // Parent that spawned this node execution - value is empty for executions at level 0 + ParentID *uint `sql:"default:null" gorm:"index"` + // List of child node executions - for cases like Dynamic task, sub workflow, etc + ChildNodeExecutions []NodeExecution `gorm:"foreignKey:ParentID;references:ID"` + // Execution Error Kind. nullable, can be one of core.ExecutionError_ErrorKind + ErrorKind *string `gorm:"index"` + // Execution Error Code nullable. string value, but finite set determined by the execution engine and plugins + ErrorCode *string + // If the node is of Type Task, this should always exist for a successful execution, indicating the cache status for the execution + CacheStatus *string + } + + return tx.AutoMigrate(&NodeExecution{}) + }, + Rollback: func(tx *gorm.DB) error { + return nil + }, + }, + + { + ID: "pg-noop-2023-03-31-noop-execution-event", + Migrate: func(tx *gorm.DB) error { + type ExecutionKey struct { + Project string `gorm:"primary_key;column:execution_project" valid:"length(0|127)"` + Domain string `gorm:"primary_key;column:execution_domain" valid:"length(0|127)"` + Name string `gorm:"primary_key;column:execution_name" valid:"length(0|127)"` + } + type ExecutionEvent struct { + ID uint `gorm:"index;autoIncrement;not null"` + CreatedAt time.Time `gorm:"type:time"` + UpdatedAt time.Time `gorm:"type:time"` + DeletedAt *time.Time `gorm:"index"` + ExecutionKey + RequestID string `valid:"length(0|255)"` + OccurredAt time.Time + Phase string `gorm:"primary_key"` + } + + return tx.AutoMigrate(&ExecutionEvent{}) + }, + Rollback: func(tx *gorm.DB) error { + return nil + }, + }, + + { + ID: "pg-noop-2023-03-31-noop-node-execution-event", + Migrate: func(tx *gorm.DB) error { + type ExecutionKey struct { + Project string `gorm:"primary_key;column:execution_project" valid:"length(0|127)"` + Domain string `gorm:"primary_key;column:execution_domain" valid:"length(0|127)"` + Name string `gorm:"primary_key;column:execution_name" valid:"length(0|127)"` + } + type NodeExecutionKey struct { + ExecutionKey + NodeID string `gorm:"primary_key;index" valid:"length(0|180)"` + } + type NodeExecutionEvent struct { + ID uint `gorm:"index;autoIncrement;not null"` + CreatedAt time.Time `gorm:"type:time"` + UpdatedAt time.Time `gorm:"type:time"` + DeletedAt *time.Time `gorm:"index"` + NodeExecutionKey + RequestID string + OccurredAt time.Time + Phase string `gorm:"primary_key"` + } + + return tx.AutoMigrate(&NodeExecutionEvent{}) + }, + Rollback: func(tx *gorm.DB) error { + return nil + }, + }, + + { + ID: "pg-noop-2023-03-31-noop-description-entity-2", + Migrate: func(tx *gorm.DB) error { + type DescriptionEntityKey struct { + ResourceType core.ResourceType `gorm:"primary_key;index:description_entity_project_domain_name_version_idx" valid:"length(0|255)"` + Project string `gorm:"primary_key;index:description_entity_project_domain_name_version_idx" valid:"length(0|255)"` + Domain string `gorm:"primary_key;index:description_entity_project_domain_name_version_idx" valid:"length(0|255)"` + Name string `gorm:"primary_key;index:description_entity_project_domain_name_version_idx" valid:"length(0|255)"` + Version string `gorm:"primary_key;index:description_entity_project_domain_name_version_idx" valid:"length(0|255)"` + } + + // SourceCode Database model to encapsulate a SourceCode. + type SourceCode struct { + Link string `valid:"length(0|255)"` + } + + // DescriptionEntity Database model to encapsulate a DescriptionEntity. + type DescriptionEntity struct { + DescriptionEntityKey + ID uint `gorm:"index;autoIncrement;not null"` + CreatedAt time.Time `gorm:"type:time"` + UpdatedAt time.Time `gorm:"type:time"` + DeletedAt *time.Time `gorm:"index"` + SourceCode + ShortDescription string + LongDescription []byte + } + + return tx.AutoMigrate(&DescriptionEntity{}) + }, + Rollback: func(tx *gorm.DB) error { + return nil + }, + }, + + { + ID: "pg-noop-2023-03-31-noop-signal", + Migrate: func(tx *gorm.DB) error { + type SignalKey struct { + ExecutionKey + SignalID string `gorm:"primary_key;index" valid:"length(0|255)"` + } + + type Signal struct { + ID uint `gorm:"index;autoIncrement;not null"` + CreatedAt time.Time `gorm:"type:time"` + UpdatedAt time.Time `gorm:"type:time"` + DeletedAt *time.Time `gorm:"index"` + SignalKey + Type []byte `gorm:"not null"` + Value []byte + } + + return tx.AutoMigrate(&Signal{}) + }, + Rollback: func(tx *gorm.DB) error { + return nil + }, + }, + + { + ID: "pg-noop-2023-03-31-noop-resource", + Migrate: func(tx *gorm.DB) error { + type ResourcePriority int32 + + // In this model, the combination of (Project, Domain, Workflow, LaunchPlan, ResourceType) is unique + type Resource struct { + ID int64 `gorm:"AUTO_INCREMENT;column:id;primary_key;not null"` + CreatedAt time.Time + UpdatedAt time.Time + DeletedAt *time.Time `sql:"index"` + Project string `gorm:"uniqueIndex:resource_idx" valid:"length(0|255)"` + Domain string `gorm:"uniqueIndex:resource_idx" valid:"length(0|255)"` + Workflow string `gorm:"uniqueIndex:resource_idx" valid:"length(0|255)"` + LaunchPlan string `gorm:"uniqueIndex:resource_idx" valid:"length(0|255)"` + ResourceType string `gorm:"uniqueIndex:resource_idx" valid:"length(0|255)"` + Priority ResourcePriority + // Serialized flyteidl.admin.MatchingAttributes. + Attributes []byte + } + + return tx.AutoMigrate(&Resource{}) + }, + Rollback: func(tx *gorm.DB) error { + return nil + }, + }, + + { + ID: "pg-noop-2023-03-31-noop-schedulable_entities", + Migrate: func(tx *gorm.DB) error { + type SchedulableEntityKey struct { + Project string `gorm:"primary_key"` + Domain string `gorm:"primary_key"` + Name string `gorm:"primary_key"` + Version string `gorm:"primary_key"` + } + type SchedulableEntity struct { + ID uint `gorm:"index;autoIncrement;not null"` + CreatedAt time.Time + UpdatedAt time.Time + DeletedAt *time.Time `gorm:"index"` + SchedulableEntityKey + CronExpression string + FixedRateValue uint32 + Unit admin.FixedRateUnit + KickoffTimeInputArg string + Active *bool + } + + return tx.AutoMigrate(&SchedulableEntity{}) + }, + Rollback: func(tx *gorm.DB) error { + return nil + }, + }, + + { + ID: "pg-noop-2023-03-31-noop-schedulable_entities-snapshot", + Migrate: func(tx *gorm.DB) error { + type ScheduleEntitiesSnapshot struct { + ID uint `gorm:"index;autoIncrement;not null"` + CreatedAt time.Time + UpdatedAt time.Time + DeletedAt *time.Time `gorm:"index"` + Snapshot []byte `gorm:"column:snapshot" schema:"-"` + } + + return tx.AutoMigrate(&ScheduleEntitiesSnapshot{}) + }, + Rollback: func(tx *gorm.DB) error { + return nil + }, + }, +} + +var Migrations = append(LegacyMigrations, NoopMigrations...) + func alterTableColumnType(db *sql.DB, columnName, columnType string) error { var err error diff --git a/pkg/repositories/database.go b/pkg/repositories/database.go index 5676ca883..fdc663ac6 100644 --- a/pkg/repositories/database.go +++ b/pkg/repositories/database.go @@ -19,6 +19,7 @@ import ( ) const pqInvalidDBCode = "3D000" +const pqDbAlreadyExistsCode = "42P04" const defaultDB = "postgres" // Resolves a password value from either a user-provided inline value or a filepath whose contents contain a password. @@ -115,7 +116,7 @@ func createPostgresDbIfNotExists(ctx context.Context, gormConfig *gorm.Config, p return gormDb, nil } - if !isInvalidDBPgError(err) { + if !isPgErrorWithCode(err, pqInvalidDBCode) { return nil, err } @@ -139,21 +140,24 @@ func createPostgresDbIfNotExists(ctx context.Context, gormConfig *gorm.Config, p result := gormDb.Exec(createDBStatement) if result.Error != nil { - return nil, result.Error + if !isPgErrorWithCode(result.Error, pqDbAlreadyExistsCode) { + return nil, result.Error + } + logger.Warningf(ctx, "Got DB already exists error for [%s], skipping...", pgConfig.DbName) } // Now try connecting to the db again return gorm.Open(dialector, gormConfig) } -func isInvalidDBPgError(err error) bool { +func isPgErrorWithCode(err error, code string) bool { pgErr := &pgconn.PgError{} if !errors.As(err, &pgErr) { // err chain does not contain a pgconn.PgError return false } - // pgconn.PgError found in chain and set to pgErr - return pgErr.Code == pqInvalidDBCode + // pgconn.PgError found in chain and set to code specified + return pgErr.Code == code } func setupDbConnectionPool(ctx context.Context, gormDb *gorm.DB, dbConfig *database.DbConfig) error { diff --git a/pkg/repositories/database_test.go b/pkg/repositories/database_test.go index 0c81349f8..4dea5585e 100644 --- a/pkg/repositories/database_test.go +++ b/pkg/repositories/database_test.go @@ -122,7 +122,7 @@ func TestIsInvalidDBPgError(t *testing.T) { tc := tc t.Run(tc.Name, func(t *testing.T) { - assert.Equal(t, tc.ExpectedResult, isInvalidDBPgError(tc.Err)) + assert.Equal(t, tc.ExpectedResult, isPgErrorWithCode(tc.Err, pqInvalidDBCode)) }) } } @@ -196,3 +196,41 @@ func TestGetDB(t *testing.T) { assert.Equal(t, "sqlite", db.Name()) }) } + +func TestIsPgDbAlreadyExistsError(t *testing.T) { + // wrap error with wrappedError when testing to ensure the function checks the whole error chain + + testCases := []struct { + Name string + Err error + ExpectedResult bool + }{ + { + Name: "nil error", + Err: nil, + ExpectedResult: false, + }, + { + Name: "not a PgError", + Err: &wrappedError{err: &net.OpError{Op: "connect", Err: errors.New("connection refused")}}, + ExpectedResult: false, + }, + { + Name: "PgError but not already exists", + Err: &wrappedError{&pgconn.PgError{Severity: "FATAL", Message: "out of memory", Code: "53200"}}, + ExpectedResult: false, + }, + { + Name: "PgError and is already exists", + Err: &wrappedError{&pgconn.PgError{Severity: "FATAL", Message: "database \"flyte\" does not exist", Code: "42P04"}}, + ExpectedResult: true, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.Name, func(t *testing.T) { + assert.Equal(t, tc.ExpectedResult, isPgErrorWithCode(tc.Err, pqDbAlreadyExistsCode)) + }) + } +} diff --git a/pkg/repositories/gorm_repo.go b/pkg/repositories/gorm_repo.go index a23852446..3d80de611 100644 --- a/pkg/repositories/gorm_repo.go +++ b/pkg/repositories/gorm_repo.go @@ -23,8 +23,10 @@ type GormRepo struct { taskExecutionRepo interfaces.TaskExecutionRepoInterface workflowRepo interfaces.WorkflowRepoInterface resourceRepo interfaces.ResourceRepoInterface + descriptionEntityRepo interfaces.DescriptionEntityRepoInterface schedulableEntityRepo schedulerInterfaces.SchedulableEntityRepoInterface scheduleEntitiesSnapshotRepo schedulerInterfaces.ScheduleEntitiesSnapShotRepoInterface + signalRepo interfaces.SignalRepoInterface } func (r *GormRepo) ExecutionRepo() interfaces.ExecutionRepoInterface { @@ -71,6 +73,10 @@ func (r *GormRepo) ResourceRepo() interfaces.ResourceRepoInterface { return r.resourceRepo } +func (r *GormRepo) DescriptionEntityRepo() interfaces.DescriptionEntityRepoInterface { + return r.descriptionEntityRepo +} + func (r *GormRepo) SchedulableEntityRepo() schedulerInterfaces.SchedulableEntityRepoInterface { return r.schedulableEntityRepo } @@ -79,6 +85,10 @@ func (r *GormRepo) ScheduleEntitiesSnapshotRepo() schedulerInterfaces.ScheduleEn return r.scheduleEntitiesSnapshotRepo } +func (r *GormRepo) SignalRepo() interfaces.SignalRepoInterface { + return r.signalRepo +} + func (r *GormRepo) GetGormDB() *gorm.DB { return r.db } @@ -97,7 +107,9 @@ func NewGormRepo(db *gorm.DB, errorTransformer errors.ErrorTransformer, scope pr taskExecutionRepo: gormimpl.NewTaskExecutionRepo(db, errorTransformer, scope.NewSubScope("task_executions")), workflowRepo: gormimpl.NewWorkflowRepo(db, errorTransformer, scope.NewSubScope("workflows")), resourceRepo: gormimpl.NewResourceRepo(db, errorTransformer, scope.NewSubScope("resources")), + descriptionEntityRepo: gormimpl.NewDescriptionEntityRepo(db, errorTransformer, scope.NewSubScope("description_entities")), schedulableEntityRepo: schedulerGormImpl.NewSchedulableEntityRepo(db, errorTransformer, scope.NewSubScope("schedulable_entity")), scheduleEntitiesSnapshotRepo: schedulerGormImpl.NewScheduleEntitiesSnapshotRepo(db, errorTransformer, scope.NewSubScope("schedule_entities_snapshot")), + signalRepo: gormimpl.NewSignalRepo(db, errorTransformer, scope.NewSubScope("signals")), } } diff --git a/pkg/repositories/gormimpl/common.go b/pkg/repositories/gormimpl/common.go index 6a9d09703..69dada2f0 100644 --- a/pkg/repositories/gormimpl/common.go +++ b/pkg/repositories/gormimpl/common.go @@ -15,6 +15,7 @@ import ( const Project = "project" const Domain = "domain" const Name = "name" +const Version = "version" const Description = "description" const ResourceType = "resource_type" const State = "state" @@ -25,6 +26,8 @@ const namedEntityMetadataTableName = "named_entity_metadata" const nodeExecutionTableName = "node_executions" const taskExecutionTableName = "task_executions" const taskTableName = "tasks" +const workflowTableName = "workflows" +const descriptionEntityTableName = "description_entities" const limit = "limit" const filters = "filters" @@ -41,6 +44,7 @@ var entityToTableName = map[common.Entity]string{ common.Workflow: "workflows", common.NamedEntity: "entities", common.NamedEntityMetadata: "named_entity_metadata", + common.Signal: "signals", } var innerJoinExecToNodeExec = fmt.Sprintf( diff --git a/pkg/repositories/gormimpl/description_entity_repo.go b/pkg/repositories/gormimpl/description_entity_repo.go new file mode 100644 index 000000000..1f5dceb5a --- /dev/null +++ b/pkg/repositories/gormimpl/description_entity_repo.go @@ -0,0 +1,115 @@ +package gormimpl + +import ( + "context" + + "github.com/flyteorg/flyteadmin/pkg/common" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flytestdlib/promutils" + + flyteAdminDbErrors "github.com/flyteorg/flyteadmin/pkg/repositories/errors" + "github.com/flyteorg/flyteadmin/pkg/repositories/interfaces" + "github.com/flyteorg/flyteadmin/pkg/repositories/models" + "gorm.io/gorm" +) + +// DescriptionEntityRepo Implementation of DescriptionEntityRepoInterface. +type DescriptionEntityRepo struct { + db *gorm.DB + errorTransformer flyteAdminDbErrors.ErrorTransformer + metrics gormMetrics +} + +func (r *DescriptionEntityRepo) Get(ctx context.Context, input interfaces.GetDescriptionEntityInput) (models.DescriptionEntity, error) { + var descriptionEntity models.DescriptionEntity + + filters, err := getDescriptionEntityFilters(input.ResourceType, input.Project, input.Domain, input.Name, input.Version) + if err != nil { + return models.DescriptionEntity{}, err + } + + tx := r.db.Table(descriptionEntityTableName) + // Apply filters + tx, err = applyFilters(tx, filters, nil) + if err != nil { + return models.DescriptionEntity{}, err + } + + timer := r.metrics.GetDuration.Start() + tx = tx.Take(&descriptionEntity) + timer.Stop() + + if tx.Error != nil { + return models.DescriptionEntity{}, r.errorTransformer.ToFlyteAdminError(tx.Error) + } + + return descriptionEntity, nil +} + +func (r *DescriptionEntityRepo) List( + ctx context.Context, input interfaces.ListResourceInput) (interfaces.DescriptionEntityCollectionOutput, error) { + // First validate input. + if err := ValidateListInput(input); err != nil { + return interfaces.DescriptionEntityCollectionOutput{}, err + } + var descriptionEntities []models.DescriptionEntity + tx := r.db.Limit(input.Limit).Offset(input.Offset) + + // Apply filters + tx, err := applyFilters(tx, input.InlineFilters, input.MapFilters) + if err != nil { + return interfaces.DescriptionEntityCollectionOutput{}, err + } + // Apply sort ordering. + if input.SortParameter != nil { + tx = tx.Order(input.SortParameter.GetGormOrderExpr()) + } + timer := r.metrics.ListDuration.Start() + tx.Find(&descriptionEntities) + timer.Stop() + if tx.Error != nil { + return interfaces.DescriptionEntityCollectionOutput{}, r.errorTransformer.ToFlyteAdminError(tx.Error) + } + return interfaces.DescriptionEntityCollectionOutput{ + Entities: descriptionEntities, + }, nil +} + +func getDescriptionEntityFilters(resourceType core.ResourceType, project string, domain string, name string, version string) ([]common.InlineFilter, error) { + entity := common.ResourceTypeToEntity[resourceType] + + filters := make([]common.InlineFilter, 0) + projectFilter, err := common.NewSingleValueFilter(entity, common.Equal, Project, project) + if err != nil { + return nil, err + } + filters = append(filters, projectFilter) + domainFilter, err := common.NewSingleValueFilter(entity, common.Equal, Domain, domain) + if err != nil { + return nil, err + } + filters = append(filters, domainFilter) + nameFilter, err := common.NewSingleValueFilter(entity, common.Equal, Name, name) + if err != nil { + return nil, err + } + filters = append(filters, nameFilter) + versionFilter, err := common.NewSingleValueFilter(entity, common.Equal, Version, version) + if err != nil { + return nil, err + } + filters = append(filters, versionFilter) + + return filters, nil +} + +// NewDescriptionEntityRepo Returns an instance of DescriptionRepoInterface +func NewDescriptionEntityRepo( + db *gorm.DB, errorTransformer flyteAdminDbErrors.ErrorTransformer, scope promutils.Scope) interfaces.DescriptionEntityRepoInterface { + metrics := newMetrics(scope) + return &DescriptionEntityRepo{ + db: db, + errorTransformer: errorTransformer, + metrics: metrics, + } +} diff --git a/pkg/repositories/gormimpl/description_entity_repo_test.go b/pkg/repositories/gormimpl/description_entity_repo_test.go new file mode 100644 index 000000000..9ae447417 --- /dev/null +++ b/pkg/repositories/gormimpl/description_entity_repo_test.go @@ -0,0 +1,125 @@ +package gormimpl + +import ( + "context" + "testing" + + "github.com/flyteorg/flyteadmin/pkg/common" + "github.com/flyteorg/flyteadmin/pkg/repositories/interfaces" + + mocket "github.com/Selvatico/go-mocket" + "github.com/flyteorg/flyteadmin/pkg/repositories/errors" + mockScope "github.com/flyteorg/flytestdlib/promutils" + "github.com/stretchr/testify/assert" +) + +const shortDescription = "hello" + +func TestGetDescriptionEntity(t *testing.T) { + descriptionEntityRepo := NewDescriptionEntityRepo(GetDbForTest(t), errors.NewTestErrorTransformer(), mockScope.NewTestScope()) + + descriptionEntities := make([]map[string]interface{}, 0) + descriptionEntity := getMockDescriptionEntityResponseFromDb(version, []byte{1, 2}) + descriptionEntities = append(descriptionEntities, descriptionEntity) + + output, err := descriptionEntityRepo.Get(context.Background(), interfaces.GetDescriptionEntityInput{ + ResourceType: resourceType, + Project: project, + Domain: domain, + Name: name, + Version: version, + }) + assert.Empty(t, output) + assert.EqualError(t, err, "Test transformer failed to find transformation to apply") + + GlobalMock := mocket.Catcher.Reset() + GlobalMock.Logging = true + // Only match on queries that append expected filters + GlobalMock.NewMock().WithQuery(`SELECT * FROM "description_entities" WHERE project = $1 AND domain = $2 AND name = $3 AND version = $4 LIMIT 1`). + WithReply(descriptionEntities) + output, err = descriptionEntityRepo.Get(context.Background(), interfaces.GetDescriptionEntityInput{ + ResourceType: resourceType, + Project: project, + Domain: domain, + Name: name, + Version: version, + }) + assert.Empty(t, err) + assert.Equal(t, project, output.Project) + assert.Equal(t, domain, output.Domain) + assert.Equal(t, name, output.Name) + assert.Equal(t, version, output.Version) + assert.Equal(t, shortDescription, output.ShortDescription) +} + +func TestListDescriptionEntities(t *testing.T) { + descriptionEntityRepo := NewDescriptionEntityRepo(GetDbForTest(t), errors.NewTestErrorTransformer(), mockScope.NewTestScope()) + + descriptionEntities := make([]map[string]interface{}, 0) + versions := []string{"ABC", "XYZ"} + for _, version := range versions { + descriptionEntity := getMockDescriptionEntityResponseFromDb(version, []byte{1, 2}) + descriptionEntities = append(descriptionEntities, descriptionEntity) + } + + GlobalMock := mocket.Catcher.Reset() + GlobalMock.NewMock().WithReply(descriptionEntities) + + collection, err := descriptionEntityRepo.List(context.Background(), interfaces.ListResourceInput{}) + assert.Equal(t, 0, len(collection.Entities)) + assert.Error(t, err) + + collection, err = descriptionEntityRepo.List(context.Background(), interfaces.ListResourceInput{ + InlineFilters: []common.InlineFilter{ + getEqualityFilter(common.Workflow, "project", project), + getEqualityFilter(common.Workflow, "domain", domain), + getEqualityFilter(common.Workflow, "name", name), + }, + Limit: 20, + }) + assert.Empty(t, err) + assert.NotEmpty(t, collection) + assert.NotEmpty(t, collection.Entities) + assert.Len(t, collection.Entities, 2) + for _, descriptionEntity := range collection.Entities { + assert.Equal(t, project, descriptionEntity.Project) + assert.Equal(t, domain, descriptionEntity.Domain) + assert.Equal(t, name, descriptionEntity.Name) + assert.Contains(t, versions, descriptionEntity.Version) + assert.Equal(t, shortDescription, descriptionEntity.ShortDescription) + } +} + +func getMockDescriptionEntityResponseFromDb(version string, digest []byte) map[string]interface{} { + descriptionEntity := make(map[string]interface{}) + descriptionEntity["resource_type"] = resourceType + descriptionEntity["project"] = project + descriptionEntity["domain"] = domain + descriptionEntity["name"] = name + descriptionEntity["version"] = version + descriptionEntity["Digest"] = digest + descriptionEntity["ShortDescription"] = shortDescription + return descriptionEntity +} + +func TestGetDescriptionEntityFilters(t *testing.T) { + filters, err := getDescriptionEntityFilters(resourceType, project, domain, name, version) + entity := common.ResourceTypeToEntity[resourceType] + assert.NoError(t, err) + + projectFilter, err := common.NewSingleValueFilter(entity, common.Equal, Project, project) + assert.NoError(t, err) + assert.Equal(t, filters[0], projectFilter) + + domainFilter, err := common.NewSingleValueFilter(entity, common.Equal, Domain, domain) + assert.NoError(t, err) + assert.Equal(t, filters[1], domainFilter) + + nameFilter, err := common.NewSingleValueFilter(entity, common.Equal, Name, name) + assert.NoError(t, err) + assert.Equal(t, filters[2], nameFilter) + + versionFilter, err := common.NewSingleValueFilter(entity, common.Equal, Version, version) + assert.NoError(t, err) + assert.Equal(t, filters[3], versionFilter) +} diff --git a/pkg/repositories/gormimpl/execution_repo_test.go b/pkg/repositories/gormimpl/execution_repo_test.go index 9a3dc194c..daf73f6e5 100644 --- a/pkg/repositories/gormimpl/execution_repo_test.go +++ b/pkg/repositories/gormimpl/execution_repo_test.go @@ -96,6 +96,7 @@ func getMockExecutionResponseFromDb(expected models.Execution) map[string]interf execution["execution_updated_at"] = expected.ExecutionUpdatedAt execution["duration"] = expected.Duration execution["mode"] = expected.Mode + execution["launch_entity"] = expected.LaunchEntity return execution } @@ -118,6 +119,7 @@ func TestGetExecution(t *testing.T) { StartedAt: &executionStartedAt, ExecutionCreatedAt: &createdAt, ExecutionUpdatedAt: &executionUpdatedAt, + LaunchEntity: "task", } executions := make([]map[string]interface{}, 0) @@ -213,7 +215,7 @@ func TestListExecutions_Filters(t *testing.T) { GlobalMock := mocket.Catcher.Reset() // Only match on queries that append the name filter - GlobalMock.NewMock().WithQuery(`SELECT * FROM "executions" WHERE executions.execution_project = $1 AND executions.execution_domain = $2 AND executions.execution_name = $3 AND (executions.workflow_id = $4) LIMIT 20`).WithReply(executions[0:1]) + GlobalMock.NewMock().WithQuery(`SELECT * FROM "executions" WHERE executions.execution_project = $1 AND executions.execution_domain = $2 AND executions.execution_name = $3 AND executions.workflow_id = $4 LIMIT 20`).WithReply(executions[0:1]) collection, err := executionRepo.List(context.Background(), interfaces.ListResourceInput{ InlineFilters: []common.InlineFilter{ @@ -303,6 +305,7 @@ func TestListExecutionsForWorkflow(t *testing.T) { Spec: []byte{3, 4}, StartedAt: &executionStartedAt, Duration: time.Hour, + LaunchEntity: "launch_plan", }) executions = append(executions, execution) @@ -310,7 +313,7 @@ func TestListExecutionsForWorkflow(t *testing.T) { GlobalMock.Logging = true // Only match on queries that append expected filters - GlobalMock.NewMock().WithQuery(`SELECT "executions"."id","executions"."created_at","executions"."updated_at","executions"."deleted_at","executions"."execution_project","executions"."execution_domain","executions"."execution_name","executions"."launch_plan_id","executions"."workflow_id","executions"."task_id","executions"."phase","executions"."closure","executions"."spec","executions"."started_at","executions"."execution_created_at","executions"."execution_updated_at","executions"."duration","executions"."abort_cause","executions"."mode","executions"."source_execution_id","executions"."parent_node_execution_id","executions"."cluster","executions"."inputs_uri","executions"."user_inputs_uri","executions"."error_kind","executions"."error_code","executions"."user","executions"."state" FROM "executions" INNER JOIN workflows ON executions.workflow_id = workflows.id INNER JOIN tasks ON executions.task_id = tasks.id WHERE executions.execution_project = $1 AND executions.execution_domain = $2 AND executions.execution_name = $3 AND (workflows.name = $4) AND tasks.name = $5 LIMIT 20`).WithReply(executions) + GlobalMock.NewMock().WithQuery(`SELECT "executions"."id","executions"."created_at","executions"."updated_at","executions"."deleted_at","executions"."execution_project","executions"."execution_domain","executions"."execution_name","executions"."launch_plan_id","executions"."workflow_id","executions"."task_id","executions"."phase","executions"."closure","executions"."spec","executions"."started_at","executions"."execution_created_at","executions"."execution_updated_at","executions"."duration","executions"."abort_cause","executions"."mode","executions"."source_execution_id","executions"."parent_node_execution_id","executions"."cluster","executions"."inputs_uri","executions"."user_inputs_uri","executions"."error_kind","executions"."error_code","executions"."user","executions"."state","executions"."launch_entity" FROM "executions" INNER JOIN workflows ON executions.workflow_id = workflows.id INNER JOIN tasks ON executions.task_id = tasks.id WHERE executions.execution_project = $1 AND executions.execution_domain = $2 AND executions.execution_name = $3 AND workflows.name = $4 AND tasks.name = $5 LIMIT 20`).WithReply(executions) collection, err := executionRepo.List(context.Background(), interfaces.ListResourceInput{ InlineFilters: []common.InlineFilter{ @@ -341,6 +344,7 @@ func TestListExecutionsForWorkflow(t *testing.T) { assert.Equal(t, []byte{3, 4}, execution.Spec) assert.Equal(t, executionStartedAt, *execution.StartedAt) assert.Equal(t, time.Hour, execution.Duration) + assert.Equal(t, "launch_plan", execution.LaunchEntity) } } diff --git a/pkg/repositories/gormimpl/launch_plan_repo_test.go b/pkg/repositories/gormimpl/launch_plan_repo_test.go index 0dd0b6126..f96bd7964 100644 --- a/pkg/repositories/gormimpl/launch_plan_repo_test.go +++ b/pkg/repositories/gormimpl/launch_plan_repo_test.go @@ -403,8 +403,8 @@ func TestListLaunchPlansForWorkflow(t *testing.T) { // HACK: gorm orders the filters on join clauses non-deterministically. Ordering of filters doesn't affect // correctness, but because the mocket library only pattern matches on substrings, both variations of the (valid) // SQL that gorm produces are checked below. - query := `SELECT "launch_plans"."id","launch_plans"."created_at","launch_plans"."updated_at","launch_plans"."deleted_at","launch_plans"."project","launch_plans"."domain","launch_plans"."name","launch_plans"."version","launch_plans"."spec","launch_plans"."workflow_id","launch_plans"."closure","launch_plans"."state","launch_plans"."digest","launch_plans"."schedule_type" FROM "launch_plans" inner join workflows on launch_plans.workflow_id = workflows.id WHERE launch_plans.project = $1 AND launch_plans.domain = $2 AND launch_plans.name = $3 AND (workflows.deleted_at = $4) LIMIT 20` - alternateQuery := `SELECT "launch_plans"."id","launch_plans"."created_at","launch_plans"."updated_at","launch_plans"."deleted_at","launch_plans"."project","launch_plans"."domain","launch_plans"."name","launch_plans"."version","launch_plans"."spec","launch_plans"."workflow_id","launch_plans"."closure","launch_plans"."state","launch_plans"."digest","launch_plans"."schedule_type" FROM "launch_plans" inner join workflows on launch_plans.workflow_id = workflows.id WHERE launch_plans.project = $1 AND launch_plans.domain = $2 AND launch_plans.name = $3 AND (workflows.deleted_at = $4) LIMIT 20` + query := `SELECT "launch_plans"."id","launch_plans"."created_at","launch_plans"."updated_at","launch_plans"."deleted_at","launch_plans"."project","launch_plans"."domain","launch_plans"."name","launch_plans"."version","launch_plans"."spec","launch_plans"."workflow_id","launch_plans"."closure","launch_plans"."state","launch_plans"."digest","launch_plans"."schedule_type" FROM "launch_plans" inner join workflows on launch_plans.workflow_id = workflows.id WHERE launch_plans.project = $1 AND launch_plans.domain = $2 AND launch_plans.name = $3 AND workflows.deleted_at = $4 LIMIT 20` + alternateQuery := `SELECT "launch_plans"."id","launch_plans"."created_at","launch_plans"."updated_at","launch_plans"."deleted_at","launch_plans"."project","launch_plans"."domain","launch_plans"."name","launch_plans"."version","launch_plans"."spec","launch_plans"."workflow_id","launch_plans"."closure","launch_plans"."state","launch_plans"."digest","launch_plans"."schedule_type" FROM "launch_plans" inner join workflows on launch_plans.workflow_id = workflows.id WHERE launch_plans.project = $1 AND launch_plans.domain = $2 AND launch_plans.name = $3 AND workflows.deleted_at = $4 LIMIT 20` GlobalMock.NewMock().WithQuery(query).WithReply(launchPlans) GlobalMock.NewMock().WithQuery(alternateQuery).WithReply(launchPlans) diff --git a/pkg/repositories/gormimpl/named_entity_repo_test.go b/pkg/repositories/gormimpl/named_entity_repo_test.go index d1867eca0..d586a2c8f 100644 --- a/pkg/repositories/gormimpl/named_entity_repo_test.go +++ b/pkg/repositories/gormimpl/named_entity_repo_test.go @@ -47,7 +47,7 @@ func TestGetNamedEntity(t *testing.T) { GlobalMock := mocket.Catcher.Reset() GlobalMock.Logging = true GlobalMock.NewMock().WithQuery( - `SELECT workflows.project,workflows.domain,workflows.name,'2' AS resource_type,named_entity_metadata.description,named_entity_metadata.state FROM "workflows" LEFT JOIN named_entity_metadata ON named_entity_metadata.resource_type = 2 AND named_entity_metadata.project = workflows.project AND named_entity_metadata.domain = workflows.domain AND named_entity_metadata.name = workflows.name WHERE (workflows.project = $1) AND (workflows.domain = $2) AND (workflows.name = $3) LIMIT 1`).WithReply(results) + `SELECT workflows.project,workflows.domain,workflows.name,'2' AS resource_type,named_entity_metadata.description,named_entity_metadata.state FROM "workflows" LEFT JOIN named_entity_metadata ON named_entity_metadata.resource_type = 2 AND named_entity_metadata.project = workflows.project AND named_entity_metadata.domain = workflows.domain AND named_entity_metadata.name = workflows.name WHERE workflows.project = $1 AND workflows.domain = $2 AND workflows.name = $3 LIMIT 1`).WithReply(results) output, err := metadataRepo.Get(context.Background(), interfaces.GetNamedEntityInput{ ResourceType: resourceType, Project: project, diff --git a/pkg/repositories/gormimpl/project_repo_test.go b/pkg/repositories/gormimpl/project_repo_test.go index 5fe7b2f73..145072133 100644 --- a/pkg/repositories/gormimpl/project_repo_test.go +++ b/pkg/repositories/gormimpl/project_repo_test.go @@ -26,7 +26,7 @@ func TestCreateProject(t *testing.T) { query := GlobalMock.NewMock() GlobalMock.Logging = true query.WithQuery( - `INSERT INTO "projects" ("created_at","updated_at","deleted_at","identifier","name","description","labels","state") VALUES ($1,$2,$3,$4,$5,$6,$7,$8)`) + `INSERT INTO "projects" ("created_at","updated_at","deleted_at","name","description","labels","state","identifier") VALUES ($1,$2,$3,$4,$5,$6,$7,$8)`) activeState := int32(admin.Project_ACTIVE) err := projectRepo.Create(context.Background(), models.Project{ diff --git a/pkg/repositories/gormimpl/signal_repo.go b/pkg/repositories/gormimpl/signal_repo.go new file mode 100644 index 000000000..b87f70316 --- /dev/null +++ b/pkg/repositories/gormimpl/signal_repo.go @@ -0,0 +1,110 @@ +package gormimpl + +import ( + "context" + "errors" + + adminerrors "github.com/flyteorg/flyteadmin/pkg/errors" + flyteAdminDbErrors "github.com/flyteorg/flyteadmin/pkg/repositories/errors" + "github.com/flyteorg/flyteadmin/pkg/repositories/interfaces" + "github.com/flyteorg/flyteadmin/pkg/repositories/models" + + "github.com/flyteorg/flytestdlib/promutils" + + "google.golang.org/grpc/codes" + + "gorm.io/gorm" +) + +// SignalRepo is an implementation of SignalRepoInterface. +type SignalRepo struct { + db *gorm.DB + errorTransformer flyteAdminDbErrors.ErrorTransformer + metrics gormMetrics +} + +// Get retrieves a signal model from the database store. +func (s *SignalRepo) Get(ctx context.Context, input models.SignalKey) (models.Signal, error) { + var signal models.Signal + timer := s.metrics.GetDuration.Start() + tx := s.db.Where(&models.Signal{ + SignalKey: input, + }).Take(&signal) + timer.Stop() + if errors.Is(tx.Error, gorm.ErrRecordNotFound) { + return models.Signal{}, adminerrors.NewFlyteAdminError(codes.NotFound, "signal does not exist") + } + if tx.Error != nil { + return models.Signal{}, s.errorTransformer.ToFlyteAdminError(tx.Error) + } + return signal, nil +} + +// GetOrCreate returns a signal if it already exists, if not it creates a new one given the input +func (s *SignalRepo) GetOrCreate(ctx context.Context, input *models.Signal) error { + timer := s.metrics.CreateDuration.Start() + tx := s.db.FirstOrCreate(&input, input) + timer.Stop() + if tx.Error != nil { + return s.errorTransformer.ToFlyteAdminError(tx.Error) + } + return nil +} + +// List fetches all signals that match the provided input +func (s *SignalRepo) List(ctx context.Context, input interfaces.ListResourceInput) ([]models.Signal, error) { + // First validate input. + if err := ValidateListInput(input); err != nil { + return nil, err + } + var signals []models.Signal + tx := s.db.Limit(input.Limit).Offset(input.Offset) + + // Apply filters + tx, err := applyFilters(tx, input.InlineFilters, input.MapFilters) + if err != nil { + return nil, err + } + // Apply sort ordering. + if input.SortParameter != nil { + tx = tx.Order(input.SortParameter.GetGormOrderExpr()) + } + timer := s.metrics.ListDuration.Start() + tx.Find(&signals) + timer.Stop() + if tx.Error != nil { + return nil, s.errorTransformer.ToFlyteAdminError(tx.Error) + } + + return signals, nil +} + +// Update sets the value field on the specified signal model +func (s *SignalRepo) Update(ctx context.Context, input models.SignalKey, value []byte) error { + signal := models.Signal{ + SignalKey: input, + Value: value, + } + + timer := s.metrics.GetDuration.Start() + tx := s.db.Model(&signal).Select("value").Updates(signal) + timer.Stop() + if tx.Error != nil { + return s.errorTransformer.ToFlyteAdminError(tx.Error) + } + if tx.RowsAffected == 0 { + return adminerrors.NewFlyteAdminError(codes.NotFound, "signal does not exist") + } + return nil +} + +// Returns an instance of SignalRepoInterface +func NewSignalRepo( + db *gorm.DB, errorTransformer flyteAdminDbErrors.ErrorTransformer, scope promutils.Scope) interfaces.SignalRepoInterface { + metrics := newMetrics(scope) + return &SignalRepo{ + db: db, + errorTransformer: errorTransformer, + metrics: metrics, + } +} diff --git a/pkg/repositories/gormimpl/signal_repo_test.go b/pkg/repositories/gormimpl/signal_repo_test.go new file mode 100644 index 000000000..374b60df8 --- /dev/null +++ b/pkg/repositories/gormimpl/signal_repo_test.go @@ -0,0 +1,185 @@ +package gormimpl + +import ( + "context" + "reflect" + "testing" + "time" + + "github.com/flyteorg/flyteadmin/pkg/common" + "github.com/flyteorg/flyteadmin/pkg/repositories/errors" + "github.com/flyteorg/flyteadmin/pkg/repositories/interfaces" + "github.com/flyteorg/flyteadmin/pkg/repositories/models" + + mockScope "github.com/flyteorg/flytestdlib/promutils" + + mocket "github.com/Selvatico/go-mocket" + + "github.com/stretchr/testify/assert" +) + +var ( + signalModel = &models.Signal{ + BaseModel: models.BaseModel{ + ID: 10, + CreatedAt: time.Time{}, + UpdatedAt: time.Time{}, + DeletedAt: nil, + }, + SignalKey: models.SignalKey{ + ExecutionKey: models.ExecutionKey{ + Project: "project", + Domain: "domain", + Name: "name", + }, + SignalID: "signal", + }, + Type: []byte{1, 2}, + Value: []byte{3, 4}, + } +) + +func toSignalMap(signalModel models.Signal) map[string]interface{} { + signal := make(map[string]interface{}) + signal["id"] = signalModel.ID + signal["created_at"] = signalModel.CreatedAt + signal["updated_at"] = signalModel.UpdatedAt + signal["execution_project"] = signalModel.Project + signal["execution_domain"] = signalModel.Domain + signal["execution_name"] = signalModel.Name + signal["signal_id"] = signalModel.SignalID + if signalModel.Type != nil { + signal["type"] = signalModel.Type + } + if signalModel.Value != nil { + signal["value"] = signalModel.Value + } + + return signal +} + +func TestGetSignal(t *testing.T) { + ctx := context.Background() + + signalRepo := NewSignalRepo(GetDbForTest(t), errors.NewTestErrorTransformer(), mockScope.NewTestScope()) + + GlobalMock := mocket.Catcher.Reset() + GlobalMock.Logging = true + + mockSelectQuery := GlobalMock.NewMock() + mockSelectQuery.WithQuery( + `SELECT * FROM "signals" WHERE "signals"."execution_project" = $1 AND "signals"."execution_domain" = $2 AND "signals"."execution_name" = $3 AND "signals"."signal_id" = $4 LIMIT 1`) + + // retrieve non-existent signalModel + lookupSignalModel, err := signalRepo.Get(ctx, signalModel.SignalKey) + assert.Error(t, err) + assert.Empty(t, lookupSignalModel) + + assert.True(t, mockSelectQuery.Triggered) + mockSelectQuery.Triggered = false // reset to false for second call + + // retrieve existent signalModel + signalModels := []map[string]interface{}{toSignalMap(*signalModel)} + mockSelectQuery.WithReply(signalModels) + + lookupSignalModel, err = signalRepo.Get(ctx, signalModel.SignalKey) + assert.NoError(t, err) + assert.True(t, reflect.DeepEqual(*signalModel, lookupSignalModel)) + + assert.True(t, mockSelectQuery.Triggered) +} + +func TestGetOrCreateSignal(t *testing.T) { + ctx := context.Background() + + signalRepo := NewSignalRepo(GetDbForTest(t), errors.NewTestErrorTransformer(), mockScope.NewTestScope()) + + GlobalMock := mocket.Catcher.Reset() + GlobalMock.Logging = true + + // create initial signalModel + mockInsertQuery := GlobalMock.NewMock() + mockInsertQuery.WithQuery( + `INSERT INTO "signals" ("created_at","updated_at","deleted_at","execution_project","execution_domain","execution_name","signal_id","type","value","id") VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9,$10)`) + + err := signalRepo.GetOrCreate(ctx, signalModel) + assert.NoError(t, err) + + assert.True(t, mockInsertQuery.Triggered) + mockInsertQuery.Triggered = false // reset to false for second call + + // initialize query mocks + signalModels := []map[string]interface{}{toSignalMap(*signalModel)} + mockSelectQuery := GlobalMock.NewMock() + mockSelectQuery.WithQuery( + `SELECT * FROM "signals" WHERE "signals"."id" = $1 AND "signals"."created_at" = $2 AND "signals"."updated_at" = $3 AND "signals"."execution_project" = $4 AND "signals"."execution_domain" = $5 AND "signals"."execution_name" = $6 AND "signals"."signal_id" = $7 AND "signals"."execution_project" = $8 AND "signals"."execution_domain" = $9 AND "signals"."execution_name" = $10 AND "signals"."signal_id" = $11 ORDER BY "signals"."id" LIMIT 1`).WithReply(signalModels) + + // retrieve existing signalModel + lookupSignalModel := &models.Signal{} + *lookupSignalModel = *signalModel + lookupSignalModel.Type = nil + lookupSignalModel.Value = nil + + err = signalRepo.GetOrCreate(ctx, lookupSignalModel) + assert.NoError(t, err) + assert.True(t, reflect.DeepEqual(signalModel, lookupSignalModel)) + + assert.True(t, mockSelectQuery.Triggered) + assert.False(t, mockInsertQuery.Triggered) +} + +func TestListSignals(t *testing.T) { + ctx := context.Background() + + signalRepo := NewSignalRepo(GetDbForTest(t), errors.NewTestErrorTransformer(), mockScope.NewTestScope()) + + GlobalMock := mocket.Catcher.Reset() + GlobalMock.Logging = true + + // read all signal models + signalModels := []map[string]interface{}{toSignalMap(*signalModel)} + mockSelectQuery := GlobalMock.NewMock() + mockSelectQuery.WithQuery( + `SELECT * FROM "signals" WHERE project = $1 AND domain = $2 AND name = $3 LIMIT 20`).WithReply(signalModels) + + signals, err := signalRepo.List(ctx, interfaces.ListResourceInput{ + InlineFilters: []common.InlineFilter{ + getEqualityFilter(common.Signal, "project", project), + getEqualityFilter(common.Signal, "domain", domain), + getEqualityFilter(common.Signal, "name", name), + }, + Limit: 20, + }) + assert.NoError(t, err) + + assert.True(t, reflect.DeepEqual([]models.Signal{*signalModel}, signals)) + assert.True(t, mockSelectQuery.Triggered) +} + +func TestUpdateSignal(t *testing.T) { + ctx := context.Background() + + signalRepo := NewSignalRepo(GetDbForTest(t), errors.NewTestErrorTransformer(), mockScope.NewTestScope()) + + GlobalMock := mocket.Catcher.Reset() + GlobalMock.Logging = true + + // update signalModel does not exits + mockUpdateQuery := GlobalMock.NewMock() + mockUpdateQuery.WithQuery( + `UPDATE "signals" SET "updated_at"=$1,"value"=$2 WHERE "execution_project" = $3 AND "execution_domain" = $4 AND "execution_name" = $5 AND "signal_id" = $6`).WithRowsNum(0) + + err := signalRepo.Update(ctx, signalModel.SignalKey, signalModel.Value) + assert.Error(t, err) + + assert.True(t, mockUpdateQuery.Triggered) + mockUpdateQuery.Triggered = false // reset to false for second call + + // update signalModel exists + mockUpdateQuery.WithRowsNum(1) + + err = signalRepo.Update(ctx, signalModel.SignalKey, signalModel.Value) + assert.NoError(t, err) + + assert.True(t, mockUpdateQuery.Triggered) +} diff --git a/pkg/repositories/gormimpl/task_repo.go b/pkg/repositories/gormimpl/task_repo.go index f48c6ca11..fae18c0db 100644 --- a/pkg/repositories/gormimpl/task_repo.go +++ b/pkg/repositories/gormimpl/task_repo.go @@ -21,14 +21,30 @@ type TaskRepo struct { metrics gormMetrics } -func (r *TaskRepo) Create(ctx context.Context, input models.Task) error { +func (r *TaskRepo) Create(_ context.Context, input models.Task, descriptionEntity *models.DescriptionEntity) error { timer := r.metrics.CreateDuration.Start() - tx := r.db.Omit("id").Create(&input) + err := r.db.Transaction(func(_ *gorm.DB) error { + if descriptionEntity == nil { + tx := r.db.Omit("id").Create(&input) + if tx.Error != nil { + return r.errorTransformer.ToFlyteAdminError(tx.Error) + } + return nil + } + tx := r.db.Omit("id").Create(descriptionEntity) + if tx.Error != nil { + return r.errorTransformer.ToFlyteAdminError(tx.Error) + } + + tx = r.db.Omit("id").Create(&input) + if tx.Error != nil { + return r.errorTransformer.ToFlyteAdminError(tx.Error) + } + + return nil + }) timer.Stop() - if tx.Error != nil { - return r.errorTransformer.ToFlyteAdminError(tx.Error) - } - return nil + return err } func (r *TaskRepo) Get(ctx context.Context, input interfaces.Identifier) (models.Task, error) { @@ -51,6 +67,7 @@ func (r *TaskRepo) Get(ctx context.Context, input interfaces.Identifier) (models Version: input.Version, }) } + if tx.Error != nil { return models.Task{}, r.errorTransformer.ToFlyteAdminError(tx.Error) } @@ -65,7 +82,6 @@ func (r *TaskRepo) List( } var tasks []models.Task tx := r.db.Limit(input.Limit).Offset(input.Offset) - // Apply filters tx, err := applyFilters(tx, input.InlineFilters, input.MapFilters) if err != nil { diff --git a/pkg/repositories/gormimpl/task_repo_test.go b/pkg/repositories/gormimpl/task_repo_test.go index 9b4cb91f4..678a5c382 100644 --- a/pkg/repositories/gormimpl/task_repo_test.go +++ b/pkg/repositories/gormimpl/task_repo_test.go @@ -29,7 +29,7 @@ func TestCreateTask(t *testing.T) { }, Closure: []byte{1, 2}, Type: pythonTestTaskType, - }) + }, &models.DescriptionEntity{ShortDescription: "hello"}) assert.NoError(t, err) } diff --git a/pkg/repositories/gormimpl/workflow_repo.go b/pkg/repositories/gormimpl/workflow_repo.go index 2c78cb2c4..69b711dab 100644 --- a/pkg/repositories/gormimpl/workflow_repo.go +++ b/pkg/repositories/gormimpl/workflow_repo.go @@ -12,8 +12,6 @@ import ( "gorm.io/gorm" ) -const workflowTableName = "workflows" - // Implementation of WorkflowRepoInterface. type WorkflowRepo struct { db *gorm.DB @@ -21,14 +19,24 @@ type WorkflowRepo struct { metrics gormMetrics } -func (r *WorkflowRepo) Create(ctx context.Context, input models.Workflow) error { +func (r *WorkflowRepo) Create(_ context.Context, input models.Workflow, descriptionEntity *models.DescriptionEntity) error { timer := r.metrics.CreateDuration.Start() - tx := r.db.Omit("id").Create(&input) + err := r.db.Transaction(func(_ *gorm.DB) error { + if descriptionEntity != nil { + tx := r.db.Omit("id").Create(descriptionEntity) + if tx.Error != nil { + return r.errorTransformer.ToFlyteAdminError(tx.Error) + } + } + tx := r.db.Omit("id").Create(&input) + if tx.Error != nil { + return r.errorTransformer.ToFlyteAdminError(tx.Error) + } + + return nil + }) timer.Stop() - if tx.Error != nil { - return r.errorTransformer.ToFlyteAdminError(tx.Error) - } - return nil + return err } func (r *WorkflowRepo) Get(ctx context.Context, input interfaces.Identifier) (models.Workflow, error) { diff --git a/pkg/repositories/gormimpl/workflow_repo_test.go b/pkg/repositories/gormimpl/workflow_repo_test.go index feac746b8..ee300d609 100644 --- a/pkg/repositories/gormimpl/workflow_repo_test.go +++ b/pkg/repositories/gormimpl/workflow_repo_test.go @@ -29,7 +29,7 @@ func TestCreateWorkflow(t *testing.T) { }, TypedInterface: typedInterface, RemoteClosureIdentifier: remoteSpecIdentifier, - }) + }, &models.DescriptionEntity{ShortDescription: "hello"}) assert.NoError(t, err) } diff --git a/pkg/repositories/interfaces/description_entity.go b/pkg/repositories/interfaces/description_entity.go new file mode 100644 index 000000000..eeba05a3b --- /dev/null +++ b/pkg/repositories/interfaces/description_entity.go @@ -0,0 +1,28 @@ +package interfaces + +import ( + "context" + + "github.com/flyteorg/flyteadmin/pkg/repositories/models" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" +) + +type GetDescriptionEntityInput struct { + ResourceType core.ResourceType + Project string + Domain string + Name string + Version string +} + +type DescriptionEntityCollectionOutput struct { + Entities []models.DescriptionEntity +} + +// DescriptionEntityRepoInterface Defines the interface for interacting with Description models. +type DescriptionEntityRepoInterface interface { + // Get Returns a matching DescriptionEntity if it exists. + Get(ctx context.Context, input GetDescriptionEntityInput) (models.DescriptionEntity, error) + // List Returns DescriptionEntity matching query parameters. A limit must be provided for the results page size + List(ctx context.Context, input ListResourceInput) (DescriptionEntityCollectionOutput, error) +} diff --git a/pkg/repositories/interfaces/repository.go b/pkg/repositories/interfaces/repository.go index 3dcaffc93..904de1e48 100644 --- a/pkg/repositories/interfaces/repository.go +++ b/pkg/repositories/interfaces/repository.go @@ -20,8 +20,10 @@ type Repository interface { NodeExecutionEventRepo() NodeExecutionEventRepoInterface TaskExecutionRepo() TaskExecutionRepoInterface NamedEntityRepo() NamedEntityRepoInterface + DescriptionEntityRepo() DescriptionEntityRepoInterface SchedulableEntityRepo() schedulerInterfaces.SchedulableEntityRepoInterface ScheduleEntitiesSnapshotRepo() schedulerInterfaces.ScheduleEntitiesSnapShotRepoInterface + SignalRepo() SignalRepoInterface GetGormDB() *gorm.DB } diff --git a/pkg/repositories/interfaces/signal_repo.go b/pkg/repositories/interfaces/signal_repo.go new file mode 100644 index 000000000..f26ca065c --- /dev/null +++ b/pkg/repositories/interfaces/signal_repo.go @@ -0,0 +1,26 @@ +package interfaces + +import ( + "context" + + "github.com/flyteorg/flyteadmin/pkg/repositories/models" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" +) + +//go:generate mockery -name=SignalRepoInterface -output=../mocks -case=underscore + +// Defines the interface for interacting with signal models. +type SignalRepoInterface interface { + // Get retrieves a signal model from the database store. + Get(ctx context.Context, input models.SignalKey) (models.Signal, error) + // GetOrCreate inserts a signal model into the database store or returns one if it already exists. + GetOrCreate(ctx context.Context, input *models.Signal) error + // List all signals that match the input values. + List(ctx context.Context, input ListResourceInput) ([]models.Signal, error) + // Update sets the value on a signal in the database store. + Update(ctx context.Context, input models.SignalKey, value []byte) error +} + +type GetSignalInput struct { + SignalID core.SignalIdentifier +} diff --git a/pkg/repositories/interfaces/task_repo.go b/pkg/repositories/interfaces/task_repo.go index f4d35377e..443200c82 100644 --- a/pkg/repositories/interfaces/task_repo.go +++ b/pkg/repositories/interfaces/task_repo.go @@ -9,7 +9,7 @@ import ( // Defines the interface for interacting with Task models. type TaskRepoInterface interface { // Inserts a task model into the database store. - Create(ctx context.Context, input models.Task) error + Create(ctx context.Context, input models.Task, descriptionEntity *models.DescriptionEntity) error // Returns a matching task if it exists. Get(ctx context.Context, input Identifier) (models.Task, error) // Returns task revisions matching query parameters. A limit must be provided for the results page size. diff --git a/pkg/repositories/interfaces/workflow_repo.go b/pkg/repositories/interfaces/workflow_repo.go index 55206124a..5e957db95 100644 --- a/pkg/repositories/interfaces/workflow_repo.go +++ b/pkg/repositories/interfaces/workflow_repo.go @@ -9,7 +9,7 @@ import ( // Defines the interface for interacting with Workflow models. type WorkflowRepoInterface interface { // Inserts a workflow model into the database store. - Create(ctx context.Context, input models.Workflow) error + Create(ctx context.Context, input models.Workflow, descriptionEntity *models.DescriptionEntity) error // Returns a matching workflow if it exists. Get(ctx context.Context, input Identifier) (models.Workflow, error) // Returns workflow revisions matching query parameters. A limit must be provided for the results page size. diff --git a/pkg/repositories/mocks/description_entity_repo.go b/pkg/repositories/mocks/description_entity_repo.go new file mode 100644 index 000000000..0e4fec93a --- /dev/null +++ b/pkg/repositories/mocks/description_entity_repo.go @@ -0,0 +1,66 @@ +// Mock implementation of a workflow repo to be used for tests. +package mocks + +import ( + "context" + + "github.com/flyteorg/flyteadmin/pkg/repositories/interfaces" + "github.com/flyteorg/flyteadmin/pkg/repositories/models" +) + +type CreateDescriptionEntityFunc func(input models.DescriptionEntity) error +type GetDescriptionEntityFunc func(input interfaces.GetDescriptionEntityInput) (models.DescriptionEntity, error) +type ListDescriptionEntityFunc func(input interfaces.ListResourceInput) (interfaces.DescriptionEntityCollectionOutput, error) + +type MockDescriptionEntityRepo struct { + createFunction CreateDescriptionEntityFunc + getFunction GetDescriptionEntityFunc + listFunction ListDescriptionEntityFunc +} + +func (r *MockDescriptionEntityRepo) Create(ctx context.Context, DescriptionEntity models.DescriptionEntity) (uint, error) { + if r.createFunction != nil { + return 1, r.createFunction(DescriptionEntity) + } + return 1, nil +} + +func (r *MockDescriptionEntityRepo) Get( + ctx context.Context, input interfaces.GetDescriptionEntityInput) (models.DescriptionEntity, error) { + if r.getFunction != nil { + return r.getFunction(input) + } + return models.DescriptionEntity{ + DescriptionEntityKey: models.DescriptionEntityKey{ + ResourceType: input.ResourceType, + Project: input.Project, + Domain: input.Domain, + Name: input.Name, + Version: input.Version, + }, + ShortDescription: "hello world", + }, nil +} + +func (r *MockDescriptionEntityRepo) List(ctx context.Context, input interfaces.ListResourceInput) (interfaces.DescriptionEntityCollectionOutput, error) { + if r.listFunction != nil { + return r.listFunction(input) + } + return interfaces.DescriptionEntityCollectionOutput{}, nil +} + +func (r *MockDescriptionEntityRepo) SetCreateCallback(createFunction CreateDescriptionEntityFunc) { + r.createFunction = createFunction +} + +func (r *MockDescriptionEntityRepo) SetGetCallback(getFunction GetDescriptionEntityFunc) { + r.getFunction = getFunction +} + +func (r *MockDescriptionEntityRepo) SetListCallback(listFunction ListDescriptionEntityFunc) { + r.listFunction = listFunction +} + +func NewMockDescriptionEntityRepo() interfaces.DescriptionEntityRepoInterface { + return &MockDescriptionEntityRepo{} +} diff --git a/pkg/repositories/mocks/repository.go b/pkg/repositories/mocks/repository.go index 27fcc17f7..5899d82e2 100644 --- a/pkg/repositories/mocks/repository.go +++ b/pkg/repositories/mocks/repository.go @@ -19,8 +19,10 @@ type MockRepository struct { resourceRepo interfaces.ResourceRepoInterface taskExecutionRepo interfaces.TaskExecutionRepoInterface namedEntityRepo interfaces.NamedEntityRepoInterface + descriptionEntityRepo interfaces.DescriptionEntityRepoInterface schedulableEntityRepo sIface.SchedulableEntityRepoInterface schedulableEntitySnapshotRepo sIface.ScheduleEntitiesSnapShotRepoInterface + signalRepo interfaces.SignalRepoInterface } func (r *MockRepository) GetGormDB() *gorm.DB { @@ -79,6 +81,14 @@ func (r *MockRepository) NamedEntityRepo() interfaces.NamedEntityRepoInterface { return r.namedEntityRepo } +func (r *MockRepository) DescriptionEntityRepo() interfaces.DescriptionEntityRepoInterface { + return r.descriptionEntityRepo +} + +func (r *MockRepository) SignalRepo() interfaces.SignalRepoInterface { + return r.signalRepo +} + func NewMockRepository() interfaces.Repository { return &MockRepository{ taskRepo: NewMockTaskRepo(), @@ -90,9 +100,11 @@ func NewMockRepository() interfaces.Repository { resourceRepo: NewMockResourceRepo(), taskExecutionRepo: NewMockTaskExecutionRepo(), namedEntityRepo: NewMockNamedEntityRepo(), + descriptionEntityRepo: NewMockDescriptionEntityRepo(), ExecutionEventRepoIface: &ExecutionEventRepoInterface{}, NodeExecutionEventRepoIface: &NodeExecutionEventRepoInterface{}, schedulableEntityRepo: &sMocks.SchedulableEntityRepoInterface{}, schedulableEntitySnapshotRepo: &sMocks.ScheduleEntitiesSnapShotRepoInterface{}, + signalRepo: &SignalRepoInterface{}, } } diff --git a/pkg/repositories/mocks/signal_repo_interface.go b/pkg/repositories/mocks/signal_repo_interface.go new file mode 100644 index 000000000..f60307911 --- /dev/null +++ b/pkg/repositories/mocks/signal_repo_interface.go @@ -0,0 +1,161 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + interfaces "github.com/flyteorg/flyteadmin/pkg/repositories/interfaces" + mock "github.com/stretchr/testify/mock" + + models "github.com/flyteorg/flyteadmin/pkg/repositories/models" +) + +// SignalRepoInterface is an autogenerated mock type for the SignalRepoInterface type +type SignalRepoInterface struct { + mock.Mock +} + +type SignalRepoInterface_Get struct { + *mock.Call +} + +func (_m SignalRepoInterface_Get) Return(_a0 models.Signal, _a1 error) *SignalRepoInterface_Get { + return &SignalRepoInterface_Get{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *SignalRepoInterface) OnGet(ctx context.Context, input models.SignalKey) *SignalRepoInterface_Get { + c_call := _m.On("Get", ctx, input) + return &SignalRepoInterface_Get{Call: c_call} +} + +func (_m *SignalRepoInterface) OnGetMatch(matchers ...interface{}) *SignalRepoInterface_Get { + c_call := _m.On("Get", matchers...) + return &SignalRepoInterface_Get{Call: c_call} +} + +// Get provides a mock function with given fields: ctx, input +func (_m *SignalRepoInterface) Get(ctx context.Context, input models.SignalKey) (models.Signal, error) { + ret := _m.Called(ctx, input) + + var r0 models.Signal + if rf, ok := ret.Get(0).(func(context.Context, models.SignalKey) models.Signal); ok { + r0 = rf(ctx, input) + } else { + r0 = ret.Get(0).(models.Signal) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, models.SignalKey) error); ok { + r1 = rf(ctx, input) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type SignalRepoInterface_GetOrCreate struct { + *mock.Call +} + +func (_m SignalRepoInterface_GetOrCreate) Return(_a0 error) *SignalRepoInterface_GetOrCreate { + return &SignalRepoInterface_GetOrCreate{Call: _m.Call.Return(_a0)} +} + +func (_m *SignalRepoInterface) OnGetOrCreate(ctx context.Context, input *models.Signal) *SignalRepoInterface_GetOrCreate { + c_call := _m.On("GetOrCreate", ctx, input) + return &SignalRepoInterface_GetOrCreate{Call: c_call} +} + +func (_m *SignalRepoInterface) OnGetOrCreateMatch(matchers ...interface{}) *SignalRepoInterface_GetOrCreate { + c_call := _m.On("GetOrCreate", matchers...) + return &SignalRepoInterface_GetOrCreate{Call: c_call} +} + +// GetOrCreate provides a mock function with given fields: ctx, input +func (_m *SignalRepoInterface) GetOrCreate(ctx context.Context, input *models.Signal) error { + ret := _m.Called(ctx, input) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *models.Signal) error); ok { + r0 = rf(ctx, input) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type SignalRepoInterface_List struct { + *mock.Call +} + +func (_m SignalRepoInterface_List) Return(_a0 []models.Signal, _a1 error) *SignalRepoInterface_List { + return &SignalRepoInterface_List{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *SignalRepoInterface) OnList(ctx context.Context, input interfaces.ListResourceInput) *SignalRepoInterface_List { + c_call := _m.On("List", ctx, input) + return &SignalRepoInterface_List{Call: c_call} +} + +func (_m *SignalRepoInterface) OnListMatch(matchers ...interface{}) *SignalRepoInterface_List { + c_call := _m.On("List", matchers...) + return &SignalRepoInterface_List{Call: c_call} +} + +// List provides a mock function with given fields: ctx, input +func (_m *SignalRepoInterface) List(ctx context.Context, input interfaces.ListResourceInput) ([]models.Signal, error) { + ret := _m.Called(ctx, input) + + var r0 []models.Signal + if rf, ok := ret.Get(0).(func(context.Context, interfaces.ListResourceInput) []models.Signal); ok { + r0 = rf(ctx, input) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]models.Signal) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, interfaces.ListResourceInput) error); ok { + r1 = rf(ctx, input) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type SignalRepoInterface_Update struct { + *mock.Call +} + +func (_m SignalRepoInterface_Update) Return(_a0 error) *SignalRepoInterface_Update { + return &SignalRepoInterface_Update{Call: _m.Call.Return(_a0)} +} + +func (_m *SignalRepoInterface) OnUpdate(ctx context.Context, input models.SignalKey, value []byte) *SignalRepoInterface_Update { + c_call := _m.On("Update", ctx, input, value) + return &SignalRepoInterface_Update{Call: c_call} +} + +func (_m *SignalRepoInterface) OnUpdateMatch(matchers ...interface{}) *SignalRepoInterface_Update { + c_call := _m.On("Update", matchers...) + return &SignalRepoInterface_Update{Call: c_call} +} + +// Update provides a mock function with given fields: ctx, input, value +func (_m *SignalRepoInterface) Update(ctx context.Context, input models.SignalKey, value []byte) error { + ret := _m.Called(ctx, input, value) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, models.SignalKey, []byte) error); ok { + r0 = rf(ctx, input, value) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/pkg/repositories/mocks/task_repo.go b/pkg/repositories/mocks/task_repo.go index 5bbefedb9..c4d76fe99 100644 --- a/pkg/repositories/mocks/task_repo.go +++ b/pkg/repositories/mocks/task_repo.go @@ -8,7 +8,7 @@ import ( "github.com/flyteorg/flyteadmin/pkg/repositories/models" ) -type CreateTaskFunc func(input models.Task) error +type CreateTaskFunc func(input models.Task, descriptionEntity *models.DescriptionEntity) error type GetTaskFunc func(input interfaces.Identifier) (models.Task, error) type ListTaskFunc func(input interfaces.ListResourceInput) (interfaces.TaskCollectionOutput, error) type ListTaskIdentifiersFunc func(input interfaces.ListResourceInput) (interfaces.TaskCollectionOutput, error) @@ -20,9 +20,9 @@ type MockTaskRepo struct { listUniqueTaskIdsFunction ListTaskIdentifiersFunc } -func (r *MockTaskRepo) Create(ctx context.Context, input models.Task) error { +func (r *MockTaskRepo) Create(ctx context.Context, input models.Task, descriptionEntity *models.DescriptionEntity) error { if r.createFunction != nil { - return r.createFunction(input) + return r.createFunction(input, descriptionEntity) } return nil } diff --git a/pkg/repositories/mocks/workflow_repo.go b/pkg/repositories/mocks/workflow_repo.go index d029f52a3..5c9e8796a 100644 --- a/pkg/repositories/mocks/workflow_repo.go +++ b/pkg/repositories/mocks/workflow_repo.go @@ -8,7 +8,7 @@ import ( "github.com/flyteorg/flyteadmin/pkg/repositories/models" ) -type CreateWorkflowFunc func(input models.Workflow) error +type CreateWorkflowFunc func(input models.Workflow, descriptionEntity *models.DescriptionEntity) error type GetWorkflowFunc func(input interfaces.Identifier) (models.Workflow, error) type ListWorkflowFunc func(input interfaces.ListResourceInput) (interfaces.WorkflowCollectionOutput, error) type ListIdentifiersFunc func(input interfaces.ListResourceInput) (interfaces.WorkflowCollectionOutput, error) @@ -20,9 +20,9 @@ type MockWorkflowRepo struct { listIdentifiersFunc ListIdentifiersFunc } -func (r *MockWorkflowRepo) Create(ctx context.Context, input models.Workflow) error { +func (r *MockWorkflowRepo) Create(ctx context.Context, input models.Workflow, descriptionEntity *models.DescriptionEntity) error { if r.createFunction != nil { - return r.createFunction(input) + return r.createFunction(input, descriptionEntity) } return nil } diff --git a/pkg/repositories/models/description_entity.go b/pkg/repositories/models/description_entity.go new file mode 100644 index 000000000..3a5c71625 --- /dev/null +++ b/pkg/repositories/models/description_entity.go @@ -0,0 +1,30 @@ +package models + +import "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" + +// DescriptionEntityKey DescriptionEntity primary key +type DescriptionEntityKey struct { + ResourceType core.ResourceType `gorm:"primary_key;index:description_entity_project_domain_name_version_idx" valid:"length(0|255)"` + Project string `gorm:"primary_key;index:description_entity_project_domain_name_version_idx" valid:"length(0|255)"` + Domain string `gorm:"primary_key;index:description_entity_project_domain_name_version_idx" valid:"length(0|255)"` + Name string `gorm:"primary_key;index:description_entity_project_domain_name_version_idx" valid:"length(0|255)"` + Version string `gorm:"primary_key;index:description_entity_project_domain_name_version_idx" valid:"length(0|255)"` +} + +// SourceCode Database model to encapsulate a SourceCode. +type SourceCode struct { + Link string `valid:"length(0|255)"` +} + +// DescriptionEntity Database model to encapsulate a DescriptionEntity. +type DescriptionEntity struct { + DescriptionEntityKey + + BaseModel + + ShortDescription string + + LongDescription []byte + + SourceCode +} diff --git a/pkg/repositories/models/execution.go b/pkg/repositories/models/execution.go index 08c248a64..1c5e1300d 100644 --- a/pkg/repositories/models/execution.go +++ b/pkg/repositories/models/execution.go @@ -58,4 +58,6 @@ type Execution struct { User string `gorm:"index" valid:"length(0|255)"` // GORM doesn't save the zero value for ints, so we use a pointer for the State field State *int32 `gorm:"index;default:0"` + // The resource type of the entity used to launch the execution, one of 'launch_plan' or 'task' + LaunchEntity string } diff --git a/pkg/repositories/models/signal.go b/pkg/repositories/models/signal.go new file mode 100644 index 000000000..8a7fac693 --- /dev/null +++ b/pkg/repositories/models/signal.go @@ -0,0 +1,15 @@ +package models + +// Signal primary key +type SignalKey struct { + ExecutionKey + SignalID string `gorm:"primary_key;index" valid:"length(0|255)"` +} + +// Database model to encapsulate a signal. +type Signal struct { + BaseModel + SignalKey + Type []byte `gorm:"not null"` + Value []byte +} diff --git a/pkg/repositories/models/task.go b/pkg/repositories/models/task.go index 093bd29ec..53ee1c8b8 100644 --- a/pkg/repositories/models/task.go +++ b/pkg/repositories/models/task.go @@ -20,4 +20,6 @@ type Task struct { Digest []byte // Task type (also stored in the closure put promoted as a column for filtering). Type string `valid:"length(0|255)"` + // ShortDescription for the task. + ShortDescription string } diff --git a/pkg/repositories/models/workflow.go b/pkg/repositories/models/workflow.go index 5f50379b1..f431c47b3 100644 --- a/pkg/repositories/models/workflow.go +++ b/pkg/repositories/models/workflow.go @@ -16,4 +16,6 @@ type Workflow struct { RemoteClosureIdentifier string `gorm:"not null" valid:"length(0|255)"` // Hash of the compiled workflow closure Digest []byte + // ShortDescription for the workflow. + ShortDescription string } diff --git a/pkg/repositories/transformers/constants.go b/pkg/repositories/transformers/constants.go index 8b7c80750..e7794b6a6 100644 --- a/pkg/repositories/transformers/constants.go +++ b/pkg/repositories/transformers/constants.go @@ -1,5 +1,8 @@ package transformers +// InputsObjectSuffix is used when execution event data includes inline events and admin offloads the data. +const InputsObjectSuffix = "offloaded_inputs" + // OutputsObjectSuffix is used when execution event data includes inline outputs but the admin deployment is configured // to offload such data. The generated file path for the offloaded data will include the execution identifier and this suffix. const OutputsObjectSuffix = "offloaded_outputs" diff --git a/pkg/repositories/transformers/description_entity.go b/pkg/repositories/transformers/description_entity.go new file mode 100644 index 000000000..e57aa1ba1 --- /dev/null +++ b/pkg/repositories/transformers/description_entity.go @@ -0,0 +1,94 @@ +package transformers + +import ( + "context" + + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" + + "github.com/flyteorg/flyteadmin/pkg/errors" + "github.com/flyteorg/flyteadmin/pkg/repositories/models" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytestdlib/logger" + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/codes" +) + +// CreateDescriptionEntityModel Transforms a TaskCreateRequest to a Description entity model +func CreateDescriptionEntityModel( + descriptionEntity *admin.DescriptionEntity, + id core.Identifier) (*models.DescriptionEntity, error) { + ctx := context.Background() + if descriptionEntity == nil { + return nil, nil + } + + var longDescriptionBytes []byte + var sourceCode models.SourceCode + var err error + + if descriptionEntity.LongDescription != nil { + longDescriptionBytes, err = proto.Marshal(descriptionEntity.LongDescription) + if err != nil { + logger.Errorf(ctx, "Failed to marshal LongDescription with error: %v", err) + return nil, err + } + } + + if descriptionEntity.LongDescription != nil { + longDescriptionBytes, err = proto.Marshal(descriptionEntity.LongDescription) + if err != nil { + logger.Errorf(ctx, "Failed to marshal LongDescription with error: %v", err) + return nil, err + } + } + if descriptionEntity.SourceCode != nil { + sourceCode = models.SourceCode{Link: descriptionEntity.SourceCode.Link} + } + + return &models.DescriptionEntity{ + DescriptionEntityKey: models.DescriptionEntityKey{ + ResourceType: id.ResourceType, + Project: id.Project, + Domain: id.Domain, + Name: id.Name, + Version: id.Version, + }, + ShortDescription: descriptionEntity.ShortDescription, + LongDescription: longDescriptionBytes, + SourceCode: sourceCode, + }, nil +} + +func FromDescriptionEntityModel(descriptionEntityModel models.DescriptionEntity) (*admin.DescriptionEntity, error) { + + longDescription := admin.Description{} + err := proto.Unmarshal(descriptionEntityModel.LongDescription, &longDescription) + if err != nil { + return nil, errors.NewFlyteAdminError(codes.Internal, "failed to unmarshal longDescription") + } + + return &admin.DescriptionEntity{ + Id: &core.Identifier{ + ResourceType: descriptionEntityModel.ResourceType, + Project: descriptionEntityModel.Project, + Domain: descriptionEntityModel.Domain, + Name: descriptionEntityModel.Name, + Version: descriptionEntityModel.Version, + }, + ShortDescription: descriptionEntityModel.ShortDescription, + LongDescription: &longDescription, + SourceCode: &admin.SourceCode{Link: descriptionEntityModel.Link}, + }, nil +} + +func FromDescriptionEntityModels(descriptionEntityModels []models.DescriptionEntity) ([]*admin.DescriptionEntity, error) { + descriptionEntities := make([]*admin.DescriptionEntity, len(descriptionEntityModels)) + for idx, descriptionEntityModel := range descriptionEntityModels { + descriptionEntity, err := FromDescriptionEntityModel(descriptionEntityModel) + if err != nil { + return nil, err + } + descriptionEntities[idx] = descriptionEntity + } + return descriptionEntities, nil +} diff --git a/pkg/repositories/transformers/description_entity_test.go b/pkg/repositories/transformers/description_entity_test.go new file mode 100644 index 000000000..344134595 --- /dev/null +++ b/pkg/repositories/transformers/description_entity_test.go @@ -0,0 +1,90 @@ +package transformers + +import ( + "testing" + + "github.com/flyteorg/flyteadmin/pkg/repositories/models" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/assert" +) + +const shortDescription = "hello" + +func TestToDescriptionEntityExecutionModel(t *testing.T) { + longDescription := &admin.Description{IconLink: "https://flyte"} + sourceCode := &admin.SourceCode{Link: "https://github/flyte"} + + longDescriptionBytes, err := proto.Marshal(longDescription) + assert.Nil(t, err) + + descriptionEntity := &admin.DescriptionEntity{ + ShortDescription: shortDescription, + LongDescription: longDescription, + SourceCode: sourceCode, + } + + id := core.Identifier{ + ResourceType: core.ResourceType_TASK, + Project: "project", + Domain: "domain", + Version: "xyz", + } + + model, err := CreateDescriptionEntityModel(descriptionEntity, id) + assert.Nil(t, err) + assert.Equal(t, shortDescription, model.ShortDescription) + assert.Equal(t, longDescriptionBytes, model.LongDescription) + assert.Equal(t, sourceCode.Link, model.Link) +} + +func TestFromDescriptionEntityExecutionModel(t *testing.T) { + longDescription := &admin.Description{IconLink: "https://flyte"} + sourceCode := &admin.SourceCode{Link: "https://github/flyte"} + + longDescriptionBytes, err := proto.Marshal(longDescription) + assert.Nil(t, err) + + descriptionEntity, err := FromDescriptionEntityModel(models.DescriptionEntity{ + DescriptionEntityKey: models.DescriptionEntityKey{ + Project: "project", + Domain: "domain", + Name: "name", + Version: "version", + }, + ShortDescription: shortDescription, + LongDescription: longDescriptionBytes, + SourceCode: models.SourceCode{Link: "https://github/flyte"}, + }) + assert.Nil(t, err) + assert.Equal(t, descriptionEntity.ShortDescription, shortDescription) + assert.Equal(t, descriptionEntity.LongDescription.IconLink, longDescription.IconLink) + assert.Equal(t, descriptionEntity.SourceCode, sourceCode) +} + +func TestFromDescriptionEntityExecutionModels(t *testing.T) { + longDescription := &admin.Description{IconLink: "https://flyte"} + sourceCode := &admin.SourceCode{Link: "https://github/flyte"} + + longDescriptionBytes, err := proto.Marshal(longDescription) + assert.Nil(t, err) + + descriptionEntity, err := FromDescriptionEntityModels([]models.DescriptionEntity{ + { + DescriptionEntityKey: models.DescriptionEntityKey{ + Project: "project", + Domain: "domain", + Name: "name", + Version: "version", + }, + ShortDescription: shortDescription, + LongDescription: longDescriptionBytes, + SourceCode: models.SourceCode{Link: "https://github/flyte"}, + }, + }) + assert.Nil(t, err) + assert.Equal(t, descriptionEntity[0].ShortDescription, shortDescription) + assert.Equal(t, descriptionEntity[0].LongDescription.IconLink, longDescription.IconLink) + assert.Equal(t, descriptionEntity[0].SourceCode, sourceCode) +} diff --git a/pkg/repositories/transformers/execution.go b/pkg/repositories/transformers/execution.go index c7b7edd21..e92fbc589 100644 --- a/pkg/repositories/transformers/execution.go +++ b/pkg/repositories/transformers/execution.go @@ -3,6 +3,7 @@ package transformers import ( "context" "fmt" + "strings" "time" "github.com/flyteorg/flyteadmin/pkg/common" @@ -21,6 +22,8 @@ import ( "k8s.io/apimachinery/pkg/util/sets" ) +const trimmedErrMessageLen = 100 + var clusterReassignablePhases = sets.NewString(core.WorkflowExecution_UNDEFINED.String(), core.WorkflowExecution_QUEUED.String()) // CreateExecutionModelInput encapsulates request parameters for calls to CreateExecutionModel. @@ -40,6 +43,16 @@ type CreateExecutionModelInput struct { InputsURI storage.DataReference UserInputsURI storage.DataReference SecurityContext *core.SecurityContext + LaunchEntity core.ResourceType +} + +type ExecutionTransformerOptions struct { + TrimErrorMessage bool +} + +var DefaultExecutionTransformerOptions = &ExecutionTransformerOptions{} +var ListExecutionTransformerOptions = &ExecutionTransformerOptions{ + TrimErrorMessage: true, } // CreateExecutionModel transforms a ExecutionCreateRequest to a Execution model @@ -102,6 +115,7 @@ func CreateExecutionModel(input CreateExecutionModelInput) (*models.Execution, e UserInputsURI: input.UserInputsURI, User: requestSpec.Metadata.Principal, State: &activeExecution, + LaunchEntity: strings.ToLower(input.LaunchEntity.String()), } // A reference launch entity can be one of either or a task OR launch plan. Traditionally, workflows are executed // with a reference launch plan which is why this behavior is the default below. @@ -302,7 +316,7 @@ func GetExecutionIdentifier(executionModel *models.Execution) core.WorkflowExecu } } -func FromExecutionModel(executionModel models.Execution) (*admin.Execution, error) { +func FromExecutionModel(executionModel models.Execution, opts *ExecutionTransformerOptions) (*admin.Execution, error) { var spec admin.ExecutionSpec var err error if err = proto.Unmarshal(executionModel.Spec, &spec); err != nil { @@ -312,6 +326,15 @@ func FromExecutionModel(executionModel models.Execution) (*admin.Execution, erro if err = proto.Unmarshal(executionModel.Closure, &closure); err != nil { return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to unmarshal closure") } + if closure.GetError() != nil && opts != nil && opts.TrimErrorMessage && len(closure.GetError().Message) > 0 { + trimmedErrOutputResult := closure.GetError() + if len(trimmedErrOutputResult.Message) > trimmedErrMessageLen { + trimmedErrOutputResult.Message = trimmedErrOutputResult.Message[0:trimmedErrMessageLen] + } + closure.OutputResult = &admin.ExecutionClosure_Error{ + Error: trimmedErrOutputResult, + } + } if closure.StateChangeDetails == nil { // Update execution state details from model for older executions @@ -359,10 +382,10 @@ func PopulateDefaultStateChangeDetails(executionModel models.Execution) (*admin. }, nil } -func FromExecutionModels(executionModels []models.Execution) ([]*admin.Execution, error) { +func FromExecutionModels(executionModels []models.Execution, opts *ExecutionTransformerOptions) ([]*admin.Execution, error) { executions := make([]*admin.Execution, len(executionModels)) for idx, executionModel := range executionModels { - execution, err := FromExecutionModel(executionModel) + execution, err := FromExecutionModel(executionModel, opts) if err != nil { return nil, err } diff --git a/pkg/repositories/transformers/execution_test.go b/pkg/repositories/transformers/execution_test.go index 4755a4b23..fc42a82cd 100644 --- a/pkg/repositories/transformers/execution_test.go +++ b/pkg/repositories/transformers/execution_test.go @@ -88,6 +88,7 @@ func TestCreateExecutionModel(t *testing.T) { SourceExecutionID: sourceID, Cluster: cluster, SecurityContext: securityCtx, + LaunchEntity: core.ResourceType_LAUNCH_PLAN, }) assert.NoError(t, err) assert.Equal(t, "project", execution.Project) @@ -100,6 +101,7 @@ func TestCreateExecutionModel(t *testing.T) { assert.Equal(t, int32(admin.ExecutionMetadata_SYSTEM), execution.Mode) assert.Equal(t, nodeID, execution.ParentNodeExecutionID) assert.Equal(t, sourceID, execution.SourceExecutionID) + assert.Equal(t, "launch_plan", execution.LaunchEntity) expectedSpec := execRequest.Spec expectedSpec.Metadata.Principal = principal expectedSpec.Metadata.SystemMetadata = &admin.SystemMetadata{ @@ -526,7 +528,7 @@ func TestFromExecutionModel(t *testing.T) { StartedAt: &startedAt, State: &stateInt, } - execution, err := FromExecutionModel(executionModel) + execution, err := FromExecutionModel(executionModel, DefaultExecutionTransformerOptions) assert.Nil(t, err) assert.True(t, proto.Equal(&admin.Execution{ Id: &core.WorkflowExecutionIdentifier{ @@ -554,7 +556,7 @@ func TestFromExecutionModel_Aborted(t *testing.T) { AbortCause: abortCause, Closure: executionClosureBytes, } - execution, err := FromExecutionModel(executionModel) + execution, err := FromExecutionModel(executionModel, DefaultExecutionTransformerOptions) assert.Nil(t, err) assert.Equal(t, core.WorkflowExecution_ABORTED, execution.Closure.Phase) assert.True(t, proto.Equal(&admin.AbortMetadata{ @@ -562,11 +564,41 @@ func TestFromExecutionModel_Aborted(t *testing.T) { }, execution.Closure.GetAbortMetadata())) executionModel.Phase = core.WorkflowExecution_RUNNING.String() - execution, err = FromExecutionModel(executionModel) + execution, err = FromExecutionModel(executionModel, DefaultExecutionTransformerOptions) assert.Nil(t, err) assert.Empty(t, execution.Closure.GetAbortCause()) } +func TestFromExecutionModel_Error(t *testing.T) { + extraLongErrMsg := string(make([]byte, 2*trimmedErrMessageLen)) + execErr := &core.ExecutionError{ + Code: "CODE", + Message: extraLongErrMsg, + Kind: core.ExecutionError_USER, + } + executionClosureBytes, _ := proto.Marshal(&admin.ExecutionClosure{ + Phase: core.WorkflowExecution_FAILED, + OutputResult: &admin.ExecutionClosure_Error{Error: execErr}, + }) + executionModel := models.Execution{ + ExecutionKey: models.ExecutionKey{ + Project: "project", + Domain: "domain", + Name: "name", + }, + Phase: core.WorkflowExecution_FAILED.String(), + Closure: executionClosureBytes, + } + execution, err := FromExecutionModel(executionModel, &ExecutionTransformerOptions{ + TrimErrorMessage: true, + }) + expectedExecErr := execErr + expectedExecErr.Message = string(make([]byte, trimmedErrMessageLen)) + assert.Nil(t, err) + assert.Equal(t, core.WorkflowExecution_FAILED, execution.Closure.Phase) + assert.True(t, proto.Equal(expectedExecErr, execution.Closure.GetError())) +} + func TestFromExecutionModels(t *testing.T) { spec := testutils.GetExecutionRequest().Spec specBytes, _ := proto.Marshal(spec) @@ -609,7 +641,7 @@ func TestFromExecutionModels(t *testing.T) { State: &stateInt, }, } - executions, err := FromExecutionModels(executionModels) + executions, err := FromExecutionModels(executionModels, DefaultExecutionTransformerOptions) assert.Nil(t, err) assert.Len(t, executions, 1) assert.True(t, proto.Equal(&admin.Execution{ diff --git a/pkg/repositories/transformers/init_test.go b/pkg/repositories/transformers/init_test.go new file mode 100644 index 000000000..9fac6e6f5 --- /dev/null +++ b/pkg/repositories/transformers/init_test.go @@ -0,0 +1,10 @@ +package transformers + +import ( + "github.com/flyteorg/flytestdlib/contextutils" + "github.com/flyteorg/flytestdlib/promutils/labeled" +) + +func init() { + labeled.SetMetricKeys(contextutils.ProjectKey, contextutils.DomainKey, contextutils.WorkflowIDKey, contextutils.TaskIDKey) +} diff --git a/pkg/repositories/transformers/node_execution.go b/pkg/repositories/transformers/node_execution.go index 0fe9685c3..f1d90361f 100644 --- a/pkg/repositories/transformers/node_execution.go +++ b/pkg/repositories/transformers/node_execution.go @@ -3,6 +3,8 @@ package transformers import ( "context" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/event" + "github.com/flyteorg/flyteadmin/pkg/runtime/interfaces" "github.com/flyteorg/flytestdlib/storage" @@ -110,14 +112,18 @@ func CreateNodeExecutionModel(ctx context.Context, input ToNodeExecutionModelInp Name: input.Request.Event.Id.ExecutionId.Name, }, }, - Phase: input.Request.Event.Phase.String(), - InputURI: input.Request.Event.InputUri, + Phase: input.Request.Event.Phase.String(), + } + + reportedAt := input.Request.Event.ReportedAt + if reportedAt == nil || (reportedAt.Seconds == 0 && reportedAt.Nanos == 0) { + reportedAt = input.Request.Event.OccurredAt } closure := admin.NodeExecutionClosure{ Phase: input.Request.Event.Phase, CreatedAt: input.Request.Event.OccurredAt, - UpdatedAt: input.Request.Event.OccurredAt, + UpdatedAt: reportedAt, } nodeExecutionMetadata := admin.NodeExecutionMetaData{ @@ -126,6 +132,10 @@ func CreateNodeExecutionModel(ctx context.Context, input ToNodeExecutionModelInp IsParentNode: input.Request.Event.IsParent, IsDynamic: input.Request.Event.IsDynamic, } + err := handleNodeExecutionInputs(ctx, nodeExecution, input.Request, input.StorageClient) + if err != nil { + return nil, err + } if input.Request.Event.Phase == core.NodeExecution_RUNNING { err := addNodeRunningState(input.Request, nodeExecution, &closure) @@ -174,7 +184,11 @@ func CreateNodeExecutionModel(ctx context.Context, input ToNodeExecutionModelInp return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to read event timestamp") } nodeExecution.NodeExecutionCreatedAt = &nodeExecutionCreatedAt - nodeExecution.NodeExecutionUpdatedAt = &nodeExecutionCreatedAt + nodeExecutionUpdatedAt, err := ptypes.Timestamp(reportedAt) + if err != nil { + return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to read event reported_at timestamp") + } + nodeExecution.NodeExecutionUpdatedAt = &nodeExecutionUpdatedAt if input.Request.Event.ParentTaskMetadata != nil { nodeExecution.ParentTaskExecutionID = input.ParentTaskExecutionID } @@ -196,15 +210,23 @@ func UpdateNodeExecutionModel( ctx context.Context, request *admin.NodeExecutionEventRequest, nodeExecutionModel *models.NodeExecution, targetExecution *core.WorkflowExecutionIdentifier, dynamicWorkflowRemoteClosure string, inlineEventDataPolicy interfaces.InlineEventDataPolicy, storageClient *storage.DataStore) error { + err := handleNodeExecutionInputs(ctx, nodeExecutionModel, request, storageClient) + if err != nil { + return err + } var nodeExecutionClosure admin.NodeExecutionClosure - err := proto.Unmarshal(nodeExecutionModel.Closure, &nodeExecutionClosure) + err = proto.Unmarshal(nodeExecutionModel.Closure, &nodeExecutionClosure) if err != nil { return errors.NewFlyteAdminErrorf(codes.Internal, "failed to unmarshal node execution closure with error: %+v", err) } nodeExecutionModel.Phase = request.Event.Phase.String() nodeExecutionClosure.Phase = request.Event.Phase - nodeExecutionClosure.UpdatedAt = request.Event.OccurredAt + reportedAt := request.Event.ReportedAt + if reportedAt == nil || (reportedAt.Seconds == 0 && reportedAt.Nanos == 0) { + reportedAt = request.Event.OccurredAt + } + nodeExecutionClosure.UpdatedAt = reportedAt if request.Event.Phase == core.NodeExecution_RUNNING { err := addNodeRunningState(request, nodeExecutionModel, &nodeExecutionClosure) @@ -242,6 +264,12 @@ func UpdateNodeExecutionModel( nodeExecutionModel.CacheStatus = &st } nodeExecutionClosure.TargetMetadata = targetMetadata + + // if this is a dynamic task then maintain the DynamicJobSpecUri + dynamicWorkflowMetadata := request.Event.GetTaskNodeMetadata().DynamicWorkflow + if dynamicWorkflowMetadata != nil && len(dynamicWorkflowMetadata.DynamicJobSpecUri) > 0 { + nodeExecutionClosure.DynamicJobSpecUri = dynamicWorkflowMetadata.DynamicJobSpecUri + } } marshaledClosure, err := proto.Marshal(&nodeExecutionClosure) @@ -251,7 +279,7 @@ func UpdateNodeExecutionModel( } nodeExecutionModel.Closure = marshaledClosure - updatedAt, err := ptypes.Timestamp(request.Event.OccurredAt) + updatedAt, err := ptypes.Timestamp(reportedAt) if err != nil { return errors.NewFlyteAdminErrorf(codes.Internal, "failed to parse updated at timestamp") } @@ -287,12 +315,21 @@ func UpdateNodeExecutionModel( return nil } -func FromNodeExecutionModel(nodeExecutionModel models.NodeExecution) (*admin.NodeExecution, error) { +func FromNodeExecutionModel(nodeExecutionModel models.NodeExecution, opts *ExecutionTransformerOptions) (*admin.NodeExecution, error) { var closure admin.NodeExecutionClosure err := proto.Unmarshal(nodeExecutionModel.Closure, &closure) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to unmarshal closure") } + if closure.GetError() != nil && opts != nil && opts.TrimErrorMessage && len(closure.GetError().Message) > 0 { + trimmedErrOutputResult := closure.GetError() + if len(trimmedErrOutputResult.Message) > trimmedErrMessageLen { + trimmedErrOutputResult.Message = trimmedErrOutputResult.Message[0:trimmedErrMessageLen] + } + closure.OutputResult = &admin.NodeExecutionClosure_Error{ + Error: trimmedErrOutputResult, + } + } var nodeExecutionMetadata admin.NodeExecutionMetaData err = proto.Unmarshal(nodeExecutionModel.NodeExecutionMetadata, &nodeExecutionMetadata) @@ -335,3 +372,31 @@ func GetNodeExecutionInternalData(internalData []byte) (*genModel.NodeExecutionI } return &nodeExecutionInternalData, nil } + +func handleNodeExecutionInputs(ctx context.Context, + nodeExecutionModel *models.NodeExecution, + request *admin.NodeExecutionEventRequest, + storageClient *storage.DataStore) error { + if len(nodeExecutionModel.InputURI) > 0 { + // Inputs are static over the duration of the node execution, no need to update them when they're already set + return nil + } + switch request.Event.GetInputValue().(type) { + case *event.NodeExecutionEvent_InputUri: + logger.Debugf(ctx, "saving node execution input URI [%s]", request.Event.GetInputUri()) + nodeExecutionModel.InputURI = request.Event.GetInputUri() + case *event.NodeExecutionEvent_InputData: + uri, err := common.OffloadLiteralMap(ctx, storageClient, request.Event.GetInputData(), + request.Event.Id.ExecutionId.Project, request.Event.Id.ExecutionId.Domain, request.Event.Id.ExecutionId.Name, + request.Event.Id.NodeId, InputsObjectSuffix) + if err != nil { + return err + } + logger.Debugf(ctx, "offloaded node execution inputs to [%s]", uri) + nodeExecutionModel.InputURI = uri.String() + default: + logger.Debugf(ctx, "request contained no input data") + + } + return nil +} diff --git a/pkg/repositories/transformers/node_execution_test.go b/pkg/repositories/transformers/node_execution_test.go index e07bff85b..88ef8fc26 100644 --- a/pkg/repositories/transformers/node_execution_test.go +++ b/pkg/repositories/transformers/node_execution_test.go @@ -5,6 +5,9 @@ import ( "testing" "time" + "github.com/flyteorg/flyteidl/clients/go/coreutils" + "github.com/flyteorg/flytestdlib/promutils" + flyteAdminErrors "github.com/flyteorg/flyteadmin/pkg/errors" "google.golang.org/grpc/codes" @@ -51,6 +54,14 @@ var childExecutionID = &core.WorkflowExecutionIdentifier{ const dynamicWorkflowClosureRef = "s3://bucket/admin/metadata/workflow" +const testInputURI = "fake://bucket/inputs.pb" + +var testInputs = &core.LiteralMap{ + Literals: map[string]*core.Literal{ + "foo": coreutils.MustMakeLiteral("bar"), + }, +} + func TestAddRunningState(t *testing.T) { var startedAt = time.Now().UTC() var startedAtProto, _ = ptypes.TimestampProto(startedAt) @@ -198,8 +209,10 @@ func TestCreateNodeExecutionModel(t *testing.T) { Name: "name", }, }, - Phase: core.NodeExecution_RUNNING, - InputUri: "input uri", + Phase: core.NodeExecution_RUNNING, + InputValue: &event.NodeExecutionEvent_InputUri{ + InputUri: testInputURI, + }, OutputResult: &event.NodeExecutionEvent_OutputUri{ OutputUri: "output uri", }, @@ -269,7 +282,7 @@ func TestCreateNodeExecutionModel(t *testing.T) { }, Phase: "RUNNING", Closure: closureBytes, - InputURI: "input uri", + InputURI: testInputURI, StartedAt: &occurredAt, NodeExecutionCreatedAt: &occurredAt, NodeExecutionUpdatedAt: &occurredAt, @@ -291,19 +304,24 @@ func TestUpdateNodeExecutionModel(t *testing.T) { ExecutionId: childExecutionID, }, }, + InputValue: &event.NodeExecutionEvent_InputUri{ + InputUri: testInputURI, + }, }, } nodeExecutionModel := models.NodeExecution{ Phase: core.NodeExecution_UNDEFINED.String(), } + mockStore := commonMocks.GetMockStorageClient() err := UpdateNodeExecutionModel(context.TODO(), &request, &nodeExecutionModel, childExecutionID, dynamicWorkflowClosureRef, - interfaces.InlineEventDataPolicyStoreInline, commonMocks.GetMockStorageClient()) + interfaces.InlineEventDataPolicyStoreInline, mockStore) assert.Nil(t, err) assert.Equal(t, core.NodeExecution_RUNNING.String(), nodeExecutionModel.Phase) assert.Equal(t, occurredAt, *nodeExecutionModel.StartedAt) assert.EqualValues(t, occurredAt, *nodeExecutionModel.NodeExecutionUpdatedAt) assert.Nil(t, nodeExecutionModel.CacheStatus) assert.Equal(t, nodeExecutionModel.DynamicWorkflowRemoteClosureReference, dynamicWorkflowClosureRef) + assert.Equal(t, nodeExecutionModel.InputURI, testInputURI) var closure = &admin.NodeExecutionClosure{ Phase: core.NodeExecution_RUNNING, @@ -352,6 +370,7 @@ func TestUpdateNodeExecutionModel(t *testing.T) { }, }, }, + DynamicJobSpecUri: "/foo/bar", }, CheckpointUri: "last checkpoint uri", }, @@ -382,6 +401,7 @@ func TestUpdateNodeExecutionModel(t *testing.T) { CheckpointUri: request.Event.GetTaskNodeMetadata().CheckpointUri, }, }, + DynamicJobSpecUri: request.Event.GetTaskNodeMetadata().DynamicWorkflow.DynamicJobSpecUri, } var closureBytes, _ = proto.Marshal(closure) assert.Equal(t, nodeExecutionModel.Closure, closureBytes) @@ -422,6 +442,56 @@ func TestUpdateNodeExecutionModel(t *testing.T) { nodeExecMetadataExpected, _ := proto.Marshal(&nodeExecMetadata) assert.Equal(t, nodeExecutionModel.NodeExecutionMetadata, nodeExecMetadataExpected) }) + t.Run("inline input data", func(t *testing.T) { + request := admin.NodeExecutionEventRequest{ + Event: &event.NodeExecutionEvent{ + Id: sampleNodeExecID, + Phase: core.NodeExecution_RUNNING, + OccurredAt: occurredAtProto, + InputValue: &event.NodeExecutionEvent_InputData{ + InputData: testInputs, + }, + }, + } + nodeExecMetadata := admin.NodeExecutionMetaData{ + SpecNodeId: "foo", + } + nodeExecMetadataSerialized, _ := proto.Marshal(&nodeExecMetadata) + nodeExecutionModel := models.NodeExecution{ + Phase: core.NodeExecution_UNDEFINED.String(), + NodeExecutionMetadata: nodeExecMetadataSerialized, + } + ds, err := storage.NewDataStore(&storage.Config{Type: storage.TypeMemory}, promutils.NewTestScope()) + assert.NoError(t, err) + err = UpdateNodeExecutionModel(context.TODO(), &request, &nodeExecutionModel, childExecutionID, dynamicWorkflowClosureRef, + interfaces.InlineEventDataPolicyStoreInline, ds) + assert.Nil(t, err) + assert.Equal(t, nodeExecutionModel.InputURI, "/metadata/project/domain/name/node-id/offloaded_inputs") + }) + t.Run("input data URI", func(t *testing.T) { + request := admin.NodeExecutionEventRequest{ + Event: &event.NodeExecutionEvent{ + Id: sampleNodeExecID, + Phase: core.NodeExecution_RUNNING, + OccurredAt: occurredAtProto, + InputValue: &event.NodeExecutionEvent_InputUri{ + InputUri: testInputURI, + }, + }, + } + nodeExecMetadata := admin.NodeExecutionMetaData{ + SpecNodeId: "foo", + } + nodeExecMetadataSerialized, _ := proto.Marshal(&nodeExecMetadata) + nodeExecutionModel := models.NodeExecution{ + Phase: core.NodeExecution_UNDEFINED.String(), + NodeExecutionMetadata: nodeExecMetadataSerialized, + } + err := UpdateNodeExecutionModel(context.TODO(), &request, &nodeExecutionModel, childExecutionID, dynamicWorkflowClosureRef, + interfaces.InlineEventDataPolicyStoreInline, commonMocks.GetMockStorageClient()) + assert.Nil(t, err) + assert.Equal(t, nodeExecutionModel.InputURI, testInputURI) + }) } func TestFromNodeExecutionModel(t *testing.T) { @@ -447,7 +517,7 @@ func TestFromNodeExecutionModel(t *testing.T) { NodeExecutionMetadata: nodeExecutionMetadataBytes, InputURI: "input uri", Duration: duration, - }) + }, DefaultExecutionTransformerOptions) assert.Nil(t, err) assert.True(t, proto.Equal(&admin.NodeExecution{ Id: &nodeExecutionIdentifier, @@ -457,6 +527,39 @@ func TestFromNodeExecutionModel(t *testing.T) { }, nodeExecution)) } +func TestFromNodeExecutionModel_Error(t *testing.T) { + extraLongErrMsg := string(make([]byte, 2*trimmedErrMessageLen)) + execErr := &core.ExecutionError{ + Code: "CODE", + Message: extraLongErrMsg, + Kind: core.ExecutionError_USER, + } + executionClosureBytes, _ := proto.Marshal(&admin.ExecutionClosure{ + Phase: core.WorkflowExecution_FAILED, + OutputResult: &admin.ExecutionClosure_Error{Error: execErr}, + }) + nodeExecution, err := FromNodeExecutionModel(models.NodeExecution{ + NodeExecutionKey: models.NodeExecutionKey{ + NodeID: "nodey", + ExecutionKey: models.ExecutionKey{ + Project: "project", + Domain: "domain", + Name: "name", + }, + }, + Closure: executionClosureBytes, + NodeExecutionMetadata: nodeExecutionMetadataBytes, + InputURI: "input uri", + Duration: duration, + }, &ExecutionTransformerOptions{TrimErrorMessage: true}) + assert.Nil(t, err) + + expectedExecErr := execErr + expectedExecErr.Message = string(make([]byte, trimmedErrMessageLen)) + assert.Nil(t, err) + assert.True(t, proto.Equal(expectedExecErr, nodeExecution.Closure.GetError())) +} + func TestFromNodeExecutionModelWithChildren(t *testing.T) { nodeExecutionIdentifier := core.NodeExecutionIdentifier{ NodeId: "nodey", @@ -493,7 +596,7 @@ func TestFromNodeExecutionModelWithChildren(t *testing.T) { } t.Run("dynamic workflow", func(t *testing.T) { nodeExecModel.DynamicWorkflowRemoteClosureReference = "dummy_dynamic_worklfow_ref" - nodeExecution, err := FromNodeExecutionModel(nodeExecModel) + nodeExecution, err := FromNodeExecutionModel(nodeExecModel, DefaultExecutionTransformerOptions) assert.Nil(t, err) assert.True(t, proto.Equal(&admin.NodeExecution{ Id: &nodeExecutionIdentifier, @@ -509,7 +612,7 @@ func TestFromNodeExecutionModelWithChildren(t *testing.T) { }) t.Run("non dynamic workflow", func(t *testing.T) { nodeExecModel.DynamicWorkflowRemoteClosureReference = "" - nodeExecution, err := FromNodeExecutionModel(nodeExecModel) + nodeExecution, err := FromNodeExecutionModel(nodeExecModel, DefaultExecutionTransformerOptions) assert.Nil(t, err) assert.True(t, proto.Equal(&admin.NodeExecution{ Id: &nodeExecutionIdentifier, @@ -545,3 +648,60 @@ func TestGetNodeExecutionInternalData(t *testing.T) { assert.Equal(t, err.(flyteAdminErrors.FlyteAdminError).Code(), codes.Internal) }) } + +func TestHandleNodeExecutionInputs(t *testing.T) { + ctx := context.TODO() + t.Run("no need to update", func(t *testing.T) { + nodeExecutionModel := models.NodeExecution{ + InputURI: testInputURI, + } + err := handleNodeExecutionInputs(ctx, &nodeExecutionModel, nil, nil) + assert.NoError(t, err) + assert.Equal(t, nodeExecutionModel.InputURI, testInputURI) + }) + t.Run("read event input data", func(t *testing.T) { + nodeExecutionModel := models.NodeExecution{} + ds, err := storage.NewDataStore(&storage.Config{Type: storage.TypeMemory}, promutils.NewTestScope()) + assert.NoError(t, err) + err = handleNodeExecutionInputs(ctx, &nodeExecutionModel, &admin.NodeExecutionEventRequest{ + Event: &event.NodeExecutionEvent{ + Id: sampleNodeExecID, + InputValue: &event.NodeExecutionEvent_InputData{ + InputData: testInputs, + }, + }, + }, ds) + assert.NoError(t, err) + expectedOffloadedInputsLocation := "/metadata/project/domain/name/node-id/offloaded_inputs" + assert.Equal(t, nodeExecutionModel.InputURI, expectedOffloadedInputsLocation) + actualInputs := &core.LiteralMap{} + err = ds.ReadProtobuf(ctx, storage.DataReference(expectedOffloadedInputsLocation), actualInputs) + assert.NoError(t, err) + assert.True(t, proto.Equal(actualInputs, testInputs)) + }) + t.Run("read event input uri", func(t *testing.T) { + nodeExecutionModel := models.NodeExecution{} + err := handleNodeExecutionInputs(ctx, &nodeExecutionModel, &admin.NodeExecutionEventRequest{ + Event: &event.NodeExecutionEvent{ + Id: sampleNodeExecID, + InputValue: &event.NodeExecutionEvent_InputUri{ + InputUri: testInputURI, + }, + }, + }, nil) + assert.NoError(t, err) + assert.Equal(t, nodeExecutionModel.InputURI, testInputURI) + }) + t.Run("request contained no input data", func(t *testing.T) { + nodeExecutionModel := models.NodeExecution{ + InputURI: testInputURI, + } + err := handleNodeExecutionInputs(ctx, &nodeExecutionModel, &admin.NodeExecutionEventRequest{ + Event: &event.NodeExecutionEvent{ + Id: sampleNodeExecID, + }, + }, nil) + assert.NoError(t, err) + assert.Equal(t, nodeExecutionModel.InputURI, testInputURI) + }) +} diff --git a/pkg/repositories/transformers/signal.go b/pkg/repositories/transformers/signal.go new file mode 100644 index 000000000..69bb7af01 --- /dev/null +++ b/pkg/repositories/transformers/signal.go @@ -0,0 +1,134 @@ +package transformers + +import ( + "github.com/flyteorg/flyteadmin/pkg/errors" + "github.com/flyteorg/flyteadmin/pkg/repositories/models" + + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" + + "github.com/golang/protobuf/proto" + + "google.golang.org/grpc/codes" +) + +func CreateSignalModel(signalID *core.SignalIdentifier, signalType *core.LiteralType, signalValue *core.Literal) (models.Signal, error) { + signalModel := models.Signal{} + if signalID != nil { + signalKey := &signalModel.SignalKey + if signalID.ExecutionId != nil { + executionKey := &signalKey.ExecutionKey + if len(signalID.ExecutionId.Project) > 0 { + executionKey.Project = signalID.ExecutionId.Project + } + if len(signalID.ExecutionId.Domain) > 0 { + executionKey.Domain = signalID.ExecutionId.Domain + } + if len(signalID.ExecutionId.Name) > 0 { + executionKey.Name = signalID.ExecutionId.Name + } + } + + if len(signalID.SignalId) > 0 { + signalKey.SignalID = signalID.SignalId + } + } + + if signalType != nil { + typeBytes, err := proto.Marshal(signalType) + if err != nil { + return models.Signal{}, errors.NewFlyteAdminError(codes.Internal, "Failed to serialize signal type") + } + + signalModel.Type = typeBytes + } + + if signalValue != nil { + valueBytes, err := proto.Marshal(signalValue) + if err != nil { + return models.Signal{}, errors.NewFlyteAdminError(codes.Internal, "Failed to serialize signal value") + } + + signalModel.Value = valueBytes + } + + return signalModel, nil +} + +func initSignalIdentifier(id *core.SignalIdentifier) *core.SignalIdentifier { + if id == nil { + id = &core.SignalIdentifier{} + } + return id +} + +func initWorkflowExecutionIdentifier(id *core.WorkflowExecutionIdentifier) *core.WorkflowExecutionIdentifier { + if id == nil { + return &core.WorkflowExecutionIdentifier{} + } + return id +} + +func FromSignalModel(signalModel models.Signal) (admin.Signal, error) { + signal := admin.Signal{} + + var executionID *core.WorkflowExecutionIdentifier + if len(signalModel.SignalKey.ExecutionKey.Project) > 0 { + executionID = initWorkflowExecutionIdentifier(executionID) + executionID.Project = signalModel.SignalKey.ExecutionKey.Project + } + if len(signalModel.SignalKey.ExecutionKey.Domain) > 0 { + executionID = initWorkflowExecutionIdentifier(executionID) + executionID.Domain = signalModel.SignalKey.ExecutionKey.Domain + } + if len(signalModel.SignalKey.ExecutionKey.Name) > 0 { + executionID = initWorkflowExecutionIdentifier(executionID) + executionID.Name = signalModel.SignalKey.ExecutionKey.Name + } + + var signalID *core.SignalIdentifier + if executionID != nil { + signalID = initSignalIdentifier(signalID) + signalID.ExecutionId = executionID + } + if len(signalModel.SignalKey.SignalID) > 0 { + signalID = initSignalIdentifier(signalID) + signalID.SignalId = signalModel.SignalKey.SignalID + } + + if signalID != nil { + signal.Id = signalID + } + + if len(signalModel.Type) > 0 { + var typeDeserialized core.LiteralType + err := proto.Unmarshal(signalModel.Type, &typeDeserialized) + if err != nil { + return admin.Signal{}, errors.NewFlyteAdminError(codes.Internal, "failed to unmarshal signal type") + } + signal.Type = &typeDeserialized + } + + if len(signalModel.Value) > 0 { + var valueDeserialized core.Literal + err := proto.Unmarshal(signalModel.Value, &valueDeserialized) + if err != nil { + return admin.Signal{}, errors.NewFlyteAdminError(codes.Internal, "failed to unmarshal signal value") + } + signal.Value = &valueDeserialized + } + + return signal, nil +} + +func FromSignalModels(signalModels []models.Signal) ([]*admin.Signal, error) { + signals := make([]*admin.Signal, len(signalModels)) + for idx, signalModel := range signalModels { + signal, err := FromSignalModel(signalModel) + if err != nil { + return nil, err + } + signals[idx] = &signal + } + return signals, nil +} diff --git a/pkg/repositories/transformers/signal_test.go b/pkg/repositories/transformers/signal_test.go new file mode 100644 index 000000000..c43ed0bb6 --- /dev/null +++ b/pkg/repositories/transformers/signal_test.go @@ -0,0 +1,163 @@ +package transformers + +import ( + "testing" + + "github.com/flyteorg/flyteadmin/pkg/repositories/models" + + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" + + "github.com/stretchr/testify/assert" + + "github.com/golang/protobuf/proto" +) + +var ( + booleanType = core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_BOOLEAN, + }, + } + + booleanValue = core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_Boolean{ + Boolean: false, + }, + }, + }, + }, + }, + } + + signalKey = models.SignalKey{ + ExecutionKey: models.ExecutionKey{ + Project: "project", + Domain: "domain", + Name: "name", + }, + SignalID: "signal", + } + + signalID = core.SignalIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Project: "project", + Domain: "domain", + Name: "name", + }, + SignalId: "signal", + } +) + +func TestCreateSignalModel(t *testing.T) { + booleanTypeBytes, _ := proto.Marshal(&booleanType) + booleanValueBytes, _ := proto.Marshal(&booleanValue) + + tests := []struct { + name string + model models.Signal + proto admin.Signal + }{ + { + name: "Empty", + model: models.Signal{}, + proto: admin.Signal{}, + }, + { + name: "Full", + model: models.Signal{ + SignalKey: signalKey, + Type: booleanTypeBytes, + Value: booleanValueBytes, + }, + proto: admin.Signal{ + Id: &signalID, + Type: &booleanType, + Value: &booleanValue, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + signalModel, err := CreateSignalModel(test.proto.Id, test.proto.Type, test.proto.Value) + assert.NoError(t, err) + + assert.Equal(t, test.model, signalModel) + }) + } +} + +func TestFromSignalModel(t *testing.T) { + booleanTypeBytes, _ := proto.Marshal(&booleanType) + booleanValueBytes, _ := proto.Marshal(&booleanValue) + + tests := []struct { + name string + model models.Signal + proto admin.Signal + }{ + { + name: "Empty", + model: models.Signal{}, + proto: admin.Signal{}, + }, + { + name: "Full", + model: models.Signal{ + SignalKey: signalKey, + Type: booleanTypeBytes, + Value: booleanValueBytes, + }, + proto: admin.Signal{ + Id: &signalID, + Type: &booleanType, + Value: &booleanValue, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + signal, err := FromSignalModel(test.model) + assert.NoError(t, err) + + assert.True(t, proto.Equal(&test.proto, &signal)) + }) + } +} + +func TestFromSignalModels(t *testing.T) { + booleanTypeBytes, _ := proto.Marshal(&booleanType) + booleanValueBytes, _ := proto.Marshal(&booleanValue) + + signalModels := []models.Signal{ + models.Signal{}, + models.Signal{ + SignalKey: signalKey, + Type: booleanTypeBytes, + Value: booleanValueBytes, + }, + } + + signals := []*admin.Signal{ + &admin.Signal{}, + &admin.Signal{ + Id: &signalID, + Type: &booleanType, + Value: &booleanValue, + }, + } + + s, err := FromSignalModels(signalModels) + assert.NoError(t, err) + + assert.Len(t, s, len(signals)) + for idx, signal := range signals { + assert.True(t, proto.Equal(signal, s[idx])) + } +} diff --git a/pkg/repositories/transformers/task.go b/pkg/repositories/transformers/task.go index ebf9b88a3..3bb7dbaae 100644 --- a/pkg/repositories/transformers/task.go +++ b/pkg/repositories/transformers/task.go @@ -41,8 +41,9 @@ func FromTaskModel(taskModel models.Task) (admin.Task, error) { taskClosure := &admin.TaskClosure{} err := proto.Unmarshal(taskModel.Closure, taskClosure) if err != nil { - return admin.Task{}, errors.NewFlyteAdminError(codes.Internal, "failed to unmarshal clsoure") + return admin.Task{}, errors.NewFlyteAdminError(codes.Internal, "failed to unmarshal closure") } + createdAt, err := ptypes.TimestampProto(taskModel.CreatedAt) if err != nil { return admin.Task{}, errors.NewFlyteAdminErrorf(codes.Internal, "failed to serialize created at") @@ -56,8 +57,9 @@ func FromTaskModel(taskModel models.Task) (admin.Task, error) { Version: taskModel.Version, } return admin.Task{ - Id: &id, - Closure: taskClosure, + Id: &id, + Closure: taskClosure, + ShortDescription: taskModel.ShortDescription, }, nil } diff --git a/pkg/repositories/transformers/task_execution.go b/pkg/repositories/transformers/task_execution.go index f34c6ddc7..e3eca3884 100644 --- a/pkg/repositories/transformers/task_execution.go +++ b/pkg/repositories/transformers/task_execution.go @@ -121,7 +121,10 @@ func CreateTaskExecutionModel(ctx context.Context, input CreateTaskExecutionMode Phase: input.Request.Event.Phase.String(), PhaseVersion: input.Request.Event.PhaseVersion, - InputURI: input.Request.Event.InputUri, + } + err := handleTaskExecutionInputs(ctx, taskExecution, input.Request, input.StorageClient) + if err != nil { + return nil, err } metadata := input.Request.Event.Metadata @@ -136,9 +139,14 @@ func CreateTaskExecutionModel(ctx context.Context, input CreateTaskExecutionMode }) } + reportedAt := input.Request.Event.ReportedAt + if reportedAt == nil || (reportedAt.Seconds == 0 && reportedAt.Nanos == 0) { + reportedAt = input.Request.Event.OccurredAt + } + closure := &admin.TaskExecutionClosure{ Phase: input.Request.Event.Phase, - UpdatedAt: input.Request.Event.OccurredAt, + UpdatedAt: reportedAt, CreatedAt: input.Request.Event.OccurredAt, Logs: input.Request.Event.Logs, CustomInfo: input.Request.Event.CustomInfo, @@ -148,6 +156,15 @@ func CreateTaskExecutionModel(ctx context.Context, input CreateTaskExecutionMode EventVersion: input.Request.Event.EventVersion, } + if len(input.Request.Event.Reason) > 0 { + closure.Reasons = []*admin.Reason{ + &admin.Reason{ + OccurredAt: input.Request.Event.OccurredAt, + Message: input.Request.Event.Reason, + }, + } + } + eventPhase := input.Request.Event.Phase // Different tasks may report different phases as their first event. @@ -178,7 +195,11 @@ func CreateTaskExecutionModel(ctx context.Context, input CreateTaskExecutionMode return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to read event timestamp") } taskExecution.TaskExecutionCreatedAt = &taskExecutionCreatedAt - taskExecution.TaskExecutionUpdatedAt = &taskExecutionCreatedAt + taskExecutionUpdatedAt, err := ptypes.Timestamp(reportedAt) + if err != nil { + return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to read event reported_at timestamp") + } + taskExecution.TaskExecutionUpdatedAt = &taskExecutionUpdatedAt return taskExecution, nil } @@ -342,8 +363,12 @@ func mergeMetadata(existing, latest *event.TaskExecutionMetadata) *event.TaskExe func UpdateTaskExecutionModel(ctx context.Context, request *admin.TaskExecutionEventRequest, taskExecutionModel *models.TaskExecution, inlineEventDataPolicy interfaces.InlineEventDataPolicy, storageClient *storage.DataStore) error { + err := handleTaskExecutionInputs(ctx, taskExecutionModel, request, storageClient) + if err != nil { + return err + } var taskExecutionClosure admin.TaskExecutionClosure - err := proto.Unmarshal(taskExecutionModel.Closure, &taskExecutionClosure) + err = proto.Unmarshal(taskExecutionModel.Closure, &taskExecutionClosure) if err != nil { return errors.NewFlyteAdminErrorf(codes.Internal, "failed to unmarshal task execution closure with error: %+v", err) @@ -352,9 +377,25 @@ func UpdateTaskExecutionModel(ctx context.Context, request *admin.TaskExecutionE taskExecutionModel.Phase = request.Event.Phase.String() taskExecutionModel.PhaseVersion = request.Event.PhaseVersion taskExecutionClosure.Phase = request.Event.Phase - taskExecutionClosure.UpdatedAt = request.Event.OccurredAt + reportedAt := request.Event.ReportedAt + if reportedAt == nil || (reportedAt.Seconds == 0 && reportedAt.Nanos == 0) { + reportedAt = request.Event.OccurredAt + } + taskExecutionClosure.UpdatedAt = reportedAt taskExecutionClosure.Logs = mergeLogs(taskExecutionClosure.Logs, request.Event.Logs) if len(request.Event.Reason) > 0 { + if taskExecutionClosure.Reason != request.Event.Reason { + // by tracking a time-series of reasons we increase the size of the TaskExecutionClosure in scenarios where + // a task reports a large number of unique reasons. if this size increase becomes problematic we this logic + // will need to be revisited. + taskExecutionClosure.Reasons = append( + taskExecutionClosure.Reasons, + &admin.Reason{ + OccurredAt: request.Event.OccurredAt, + Message: request.Event.Reason, + }) + } + taskExecutionClosure.Reason = request.Event.Reason } if existingTaskPhase != core.TaskExecution_RUNNING.String() && taskExecutionModel.Phase == core.TaskExecution_RUNNING.String() { @@ -384,7 +425,7 @@ func UpdateTaskExecutionModel(ctx context.Context, request *admin.TaskExecutionE codes.Internal, "failed to marshal task execution closure with error: %v", err) } taskExecutionModel.Closure = marshaledClosure - updatedAt, err := ptypes.Timestamp(request.Event.OccurredAt) + updatedAt, err := ptypes.Timestamp(reportedAt) if err != nil { return errors.NewFlyteAdminErrorf(codes.Internal, "failed to parse updated at timestamp") } @@ -392,12 +433,21 @@ func UpdateTaskExecutionModel(ctx context.Context, request *admin.TaskExecutionE return nil } -func FromTaskExecutionModel(taskExecutionModel models.TaskExecution) (*admin.TaskExecution, error) { +func FromTaskExecutionModel(taskExecutionModel models.TaskExecution, opts *ExecutionTransformerOptions) (*admin.TaskExecution, error) { var closure admin.TaskExecutionClosure err := proto.Unmarshal(taskExecutionModel.Closure, &closure) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to unmarshal closure") } + if closure.GetError() != nil && opts != nil && opts.TrimErrorMessage && len(closure.GetError().Message) > 0 { + trimmedErrOutputResult := closure.GetError() + if len(trimmedErrOutputResult.Message) > trimmedErrMessageLen { + trimmedErrOutputResult.Message = trimmedErrOutputResult.Message[0:trimmedErrMessageLen] + } + closure.OutputResult = &admin.TaskExecutionClosure_Error{ + Error: trimmedErrOutputResult, + } + } taskExecution := &admin.TaskExecution{ Id: &core.TaskExecutionIdentifier{ @@ -428,10 +478,10 @@ func FromTaskExecutionModel(taskExecutionModel models.TaskExecution) (*admin.Tas return taskExecution, nil } -func FromTaskExecutionModels(taskExecutionModels []models.TaskExecution) ([]*admin.TaskExecution, error) { +func FromTaskExecutionModels(taskExecutionModels []models.TaskExecution, opts *ExecutionTransformerOptions) ([]*admin.TaskExecution, error) { taskExecutions := make([]*admin.TaskExecution, len(taskExecutionModels)) for idx, taskExecutionModel := range taskExecutionModels { - taskExecution, err := FromTaskExecutionModel(taskExecutionModel) + taskExecution, err := FromTaskExecutionModel(taskExecutionModel, opts) if err != nil { return nil, err } @@ -439,3 +489,30 @@ func FromTaskExecutionModels(taskExecutionModels []models.TaskExecution) ([]*adm } return taskExecutions, nil } + +func handleTaskExecutionInputs(ctx context.Context, taskExecutionModel *models.TaskExecution, + request *admin.TaskExecutionEventRequest, storageClient *storage.DataStore) error { + if len(taskExecutionModel.InputURI) > 0 { + // Inputs are static over the duration of the task execution, no need to update them when they're already set + return nil + } + switch request.Event.GetInputValue().(type) { + case *event.TaskExecutionEvent_InputUri: + taskExecutionModel.InputURI = request.GetEvent().GetInputUri() + case *event.TaskExecutionEvent_InputData: + uri, err := common.OffloadLiteralMap(ctx, storageClient, request.GetEvent().GetInputData(), + request.Event.ParentNodeExecutionId.ExecutionId.Project, request.Event.ParentNodeExecutionId.ExecutionId.Domain, + request.Event.ParentNodeExecutionId.ExecutionId.Name, request.Event.ParentNodeExecutionId.NodeId, + request.Event.TaskId.Project, request.Event.TaskId.Domain, request.Event.TaskId.Name, request.Event.TaskId.Version, + strconv.FormatUint(uint64(request.Event.RetryAttempt), 10), InputsObjectSuffix) + if err != nil { + return err + } + logger.Debugf(ctx, "offloaded task execution inputs to [%s]", uri) + taskExecutionModel.InputURI = uri.String() + default: + logger.Debugf(ctx, "request contained no input data") + + } + return nil +} diff --git a/pkg/repositories/transformers/task_execution_test.go b/pkg/repositories/transformers/task_execution_test.go index 03647ecec..da7af92a2 100644 --- a/pkg/repositories/transformers/task_execution_test.go +++ b/pkg/repositories/transformers/task_execution_test.go @@ -7,6 +7,8 @@ import ( "testing" "time" + "github.com/flyteorg/flytestdlib/promutils" + commonMocks "github.com/flyteorg/flyteadmin/pkg/common/mocks" "github.com/flyteorg/flyteadmin/pkg/runtime/interfaces" "github.com/flyteorg/flytestdlib/storage" @@ -226,6 +228,8 @@ func TestAddTaskTerminalState_OutputData(t *testing.T) { } func TestCreateTaskExecutionModelQueued(t *testing.T) { + ds, err := storage.NewDataStore(&storage.Config{Type: storage.TypeMemory}, promutils.NewTestScope()) + assert.NoError(t, err) taskExecutionModel, err := CreateTaskExecutionModel(context.TODO(), CreateTaskExecutionModelInput{ Request: &admin.TaskExecutionEventRequest{ Event: &event.TaskExecutionEvent{ @@ -233,12 +237,15 @@ func TestCreateTaskExecutionModelQueued(t *testing.T) { ParentNodeExecutionId: sampleNodeExecID, Phase: core.TaskExecution_QUEUED, RetryAttempt: 1, - InputUri: "input uri", - OccurredAt: taskEventOccurredAtProto, - Reason: "Task was scheduled", - TaskType: "sidecar", + InputValue: &event.TaskExecutionEvent_InputData{ + InputData: testInputs, + }, + OccurredAt: taskEventOccurredAtProto, + Reason: "Task was scheduled", + TaskType: "sidecar", }, }, + StorageClient: ds, }) assert.Nil(t, err) @@ -248,7 +255,13 @@ func TestCreateTaskExecutionModelQueued(t *testing.T) { CreatedAt: taskEventOccurredAtProto, UpdatedAt: taskEventOccurredAtProto, Reason: "Task was scheduled", - TaskType: "sidecar", + Reasons: []*admin.Reason{ + &admin.Reason{ + OccurredAt: taskEventOccurredAtProto, + Message: "Task was scheduled", + }, + }, + TaskType: "sidecar", } expectedClosureBytes, err := proto.Marshal(expectedClosure) @@ -273,7 +286,7 @@ func TestCreateTaskExecutionModelQueued(t *testing.T) { RetryAttempt: &retryAttemptValue, }, Phase: "QUEUED", - InputURI: "input uri", + InputURI: "/metadata/project/domain/name/node-id/project/domain/task-id/task-v/1/offloaded_inputs", Closure: expectedClosureBytes, StartedAt: nil, TaskExecutionCreatedAt: &taskEventOccurredAt, @@ -290,7 +303,9 @@ func TestCreateTaskExecutionModelRunning(t *testing.T) { Phase: core.TaskExecution_RUNNING, PhaseVersion: uint32(2), RetryAttempt: 1, - InputUri: "input uri", + InputValue: &event.TaskExecutionEvent_InputUri{ + InputUri: testInputURI, + }, OutputResult: &event.TaskExecutionEvent_OutputUri{ OutputUri: "output uri", }, @@ -329,6 +344,8 @@ func TestCreateTaskExecutionModelRunning(t *testing.T) { CustomInfo: &customInfo, } + t.Logf("expected %+v %+v\n", expectedClosure.Reason, expectedClosure.Reasons) + expectedClosureBytes, err := proto.Marshal(expectedClosure) assert.Nil(t, err) @@ -352,7 +369,7 @@ func TestCreateTaskExecutionModelRunning(t *testing.T) { }, Phase: "RUNNING", PhaseVersion: uint32(2), - InputURI: "input uri", + InputURI: testInputURI, Closure: expectedClosureBytes, StartedAt: &taskEventOccurredAt, TaskExecutionCreatedAt: &taskEventOccurredAt, @@ -377,6 +394,13 @@ func TestUpdateTaskExecutionModelRunningToFailed(t *testing.T) { CustomInfo: transformMapToStructPB(t, map[string]string{ "key1": "value1", }), + Reason: "Task was scheduled", + Reasons: []*admin.Reason{ + &admin.Reason{ + OccurredAt: taskEventOccurredAtProto, + Message: "Task was scheduled", + }, + }, } closureBytes, err := proto.Marshal(existingClosure) @@ -422,7 +446,9 @@ func TestUpdateTaskExecutionModelRunningToFailed(t *testing.T) { ParentNodeExecutionId: sampleNodeExecID, Phase: core.TaskExecution_FAILED, RetryAttempt: 1, - InputUri: "input uri", + InputValue: &event.TaskExecutionEvent_InputUri{ + InputUri: testInputURI, + }, OutputResult: &event.TaskExecutionEvent_Error{ Error: outputError, }, @@ -470,6 +496,16 @@ func TestUpdateTaskExecutionModelRunningToFailed(t *testing.T) { "key1": "value1 updated", }), Reason: "task failed", + Reasons: []*admin.Reason{ + &admin.Reason{ + OccurredAt: taskEventOccurredAtProto, + Message: "Task was scheduled", + }, + &admin.Reason{ + OccurredAt: occuredAtProto, + Message: "task failed", + }, + }, } expectedClosureBytes, err := proto.Marshal(expectedClosure) @@ -537,7 +573,7 @@ func TestFromTaskExecutionModel(t *testing.T) { InputURI: "input uri", Duration: duration, Closure: closureBytes, - }) + }, DefaultExecutionTransformerOptions) assert.Nil(t, err) assert.True(t, proto.Equal(&admin.TaskExecution{ Id: &core.TaskExecutionIdentifier{ @@ -563,6 +599,68 @@ func TestFromTaskExecutionModel(t *testing.T) { }, taskExecution)) } +func TestFromTaskExecutionModel_Error(t *testing.T) { + extraLongErrMsg := string(make([]byte, 2*trimmedErrMessageLen)) + execErr := &core.ExecutionError{ + Code: "CODE", + Message: extraLongErrMsg, + Kind: core.ExecutionError_USER, + } + closureBytes, _ := proto.Marshal(&admin.ExecutionClosure{ + Phase: core.WorkflowExecution_FAILED, + OutputResult: &admin.ExecutionClosure_Error{Error: execErr}, + }) + taskExecutionModel := models.TaskExecution{ + TaskExecutionKey: models.TaskExecutionKey{ + TaskKey: models.TaskKey{ + Project: "project", + Domain: "domain", + Name: "name", + Version: "version", + }, + NodeExecutionKey: models.NodeExecutionKey{ + NodeID: "node id", + ExecutionKey: models.ExecutionKey{ + Project: "ex project", + Domain: "ex domain", + Name: "ex name", + }, + }, + RetryAttempt: &retryAttemptValue, + }, + InputURI: "input uri", + Duration: duration, + Closure: closureBytes, + } + taskExecution, err := FromTaskExecutionModel(taskExecutionModel, &ExecutionTransformerOptions{ + TrimErrorMessage: true, + }) + + expectedExecErr := execErr + expectedExecErr.Message = string(make([]byte, trimmedErrMessageLen)) + assert.Nil(t, err) + assert.True(t, proto.Equal(expectedExecErr, taskExecution.Closure.GetError())) + + extraShortErrMsg := string(make([]byte, 10)) + execErr = &core.ExecutionError{ + Code: "CODE", + Message: extraShortErrMsg, + Kind: core.ExecutionError_USER, + } + closureBytes, _ = proto.Marshal(&admin.ExecutionClosure{ + Phase: core.WorkflowExecution_FAILED, + OutputResult: &admin.ExecutionClosure_Error{Error: execErr}, + }) + taskExecutionModel.Closure = closureBytes + taskExecution, err = FromTaskExecutionModel(taskExecutionModel, &ExecutionTransformerOptions{ + TrimErrorMessage: true, + }) + expectedExecErr = execErr + expectedExecErr.Message = string(make([]byte, 10)) + assert.Nil(t, err) + assert.True(t, proto.Equal(expectedExecErr, taskExecution.Closure.GetError())) +} + func TestFromTaskExecutionModels(t *testing.T) { taskClosure := &admin.TaskExecutionClosure{ Phase: core.TaskExecution_RUNNING, @@ -598,7 +696,7 @@ func TestFromTaskExecutionModels(t *testing.T) { Duration: duration, Closure: closureBytes, }, - }) + }, DefaultExecutionTransformerOptions) assert.Nil(t, err) assert.Len(t, taskExecutions, 1) assert.True(t, proto.Equal(&admin.TaskExecution{ @@ -1182,3 +1280,66 @@ func TestMergeMetadata(t *testing.T) { }) } } + +func TestHandleTaskExecutionInputs(t *testing.T) { + ctx := context.TODO() + t.Run("no need to update", func(t *testing.T) { + taskExecutionModel := models.TaskExecution{ + InputURI: testInputURI, + } + err := handleTaskExecutionInputs(ctx, &taskExecutionModel, nil, nil) + assert.NoError(t, err) + assert.Equal(t, taskExecutionModel.InputURI, testInputURI) + }) + t.Run("read event input data", func(t *testing.T) { + taskExecutionModel := models.TaskExecution{} + ds, err := storage.NewDataStore(&storage.Config{Type: storage.TypeMemory}, promutils.NewTestScope()) + assert.NoError(t, err) + err = handleTaskExecutionInputs(ctx, &taskExecutionModel, &admin.TaskExecutionEventRequest{ + Event: &event.TaskExecutionEvent{ + TaskId: sampleTaskID, + ParentNodeExecutionId: sampleNodeExecID, + RetryAttempt: retryAttemptValue, + InputValue: &event.TaskExecutionEvent_InputData{ + InputData: testInputs, + }, + }, + }, ds) + assert.NoError(t, err) + expectedOffloadedInputsLocation := "/metadata/project/domain/name/node-id/project/domain/task-id/task-v/1/offloaded_inputs" + assert.Equal(t, taskExecutionModel.InputURI, expectedOffloadedInputsLocation) + actualInputs := &core.LiteralMap{} + err = ds.ReadProtobuf(ctx, storage.DataReference(expectedOffloadedInputsLocation), actualInputs) + assert.NoError(t, err) + assert.True(t, proto.Equal(actualInputs, testInputs)) + }) + t.Run("read event input uri", func(t *testing.T) { + taskExecutionModel := models.TaskExecution{} + err := handleTaskExecutionInputs(ctx, &taskExecutionModel, &admin.TaskExecutionEventRequest{ + Event: &event.TaskExecutionEvent{ + TaskId: sampleTaskID, + ParentNodeExecutionId: sampleNodeExecID, + RetryAttempt: retryAttemptValue, + InputValue: &event.TaskExecutionEvent_InputUri{ + InputUri: testInputURI, + }, + }, + }, nil) + assert.NoError(t, err) + assert.Equal(t, taskExecutionModel.InputURI, testInputURI) + }) + t.Run("request contained no input data", func(t *testing.T) { + taskExecutionModel := models.TaskExecution{ + InputURI: testInputURI, + } + err := handleTaskExecutionInputs(ctx, &taskExecutionModel, &admin.TaskExecutionEventRequest{ + Event: &event.TaskExecutionEvent{ + TaskId: sampleTaskID, + ParentNodeExecutionId: sampleNodeExecID, + RetryAttempt: retryAttemptValue, + }, + }, nil) + assert.NoError(t, err) + assert.Equal(t, taskExecutionModel.InputURI, testInputURI) + }) +} diff --git a/pkg/repositories/transformers/workflow.go b/pkg/repositories/transformers/workflow.go index eeb1c5bb5..ce9b75e00 100644 --- a/pkg/repositories/transformers/workflow.go +++ b/pkg/repositories/transformers/workflow.go @@ -53,6 +53,7 @@ func FromWorkflowModel(workflowModel models.Workflow) (admin.Workflow, error) { Closure: &admin.WorkflowClosure{ CreatedAt: createdAt, }, + ShortDescription: workflowModel.ShortDescription, }, nil } diff --git a/pkg/rpc/adminservice/base.go b/pkg/rpc/adminservice/base.go index a4a7eb93a..77c78f480 100644 --- a/pkg/rpc/adminservice/base.go +++ b/pkg/rpc/adminservice/base.go @@ -34,17 +34,19 @@ import ( type AdminService struct { service.UnimplementedAdminServiceServer - TaskManager interfaces.TaskInterface - WorkflowManager interfaces.WorkflowInterface - LaunchPlanManager interfaces.LaunchPlanInterface - ExecutionManager interfaces.ExecutionInterface - NodeExecutionManager interfaces.NodeExecutionInterface - TaskExecutionManager interfaces.TaskExecutionInterface - ProjectManager interfaces.ProjectInterface - ResourceManager interfaces.ResourceInterface - NamedEntityManager interfaces.NamedEntityInterface - VersionManager interfaces.VersionInterface - Metrics AdminMetrics + TaskManager interfaces.TaskInterface + WorkflowManager interfaces.WorkflowInterface + LaunchPlanManager interfaces.LaunchPlanInterface + ExecutionManager interfaces.ExecutionInterface + NodeExecutionManager interfaces.NodeExecutionInterface + TaskExecutionManager interfaces.TaskExecutionInterface + ProjectManager interfaces.ProjectInterface + ResourceManager interfaces.ResourceInterface + NamedEntityManager interfaces.NamedEntityInterface + VersionManager interfaces.VersionInterface + DescriptionEntityManager interfaces.DescriptionEntityInterface + MetricsManager interfaces.MetricsInterface + Metrics AdminMetrics } // Intercepts all admin requests to handle panics during execution. @@ -132,6 +134,7 @@ func NewAdminServer(ctx context.Context, pluginRegistry *plugins.Registry, confi repo, configuration, workflowengineImpl.NewCompiler(), dataStorageClient, applicationConfiguration.GetMetadataStoragePrefix(), adminScope.NewSubScope("workflow_manager")) namedEntityManager := manager.NewNamedEntityManager(repo, configuration, adminScope.NewSubScope("named_entity_manager")) + descriptionEntityManager := manager.NewDescriptionEntityManager(repo, configuration, adminScope.NewSubScope("description_entity_manager")) executionEventWriter := eventWriter.NewWorkflowExecutionEventWriter(repo, applicationConfiguration.GetAsyncEventsBufferSize()) go func() { @@ -155,21 +158,27 @@ func NewAdminServer(ctx context.Context, pluginRegistry *plugins.Registry, confi nodeExecutionEventWriter.Run() }() + nodeExecutionManager := manager.NewNodeExecutionManager(repo, configuration, applicationConfiguration.GetMetadataStoragePrefix(), dataStorageClient, + adminScope.NewSubScope("node_execution_manager"), urlData, eventPublisher, cloudEventPublisher, nodeExecutionEventWriter) + taskExecutionManager := manager.NewTaskExecutionManager(repo, configuration, dataStorageClient, + adminScope.NewSubScope("task_execution_manager"), urlData, eventPublisher, cloudEventPublisher) + logger.Info(ctx, "Initializing a new AdminService") return &AdminService{ TaskManager: manager.NewTaskManager(repo, configuration, workflowengineImpl.NewCompiler(), adminScope.NewSubScope("task_manager")), - WorkflowManager: workflowManager, - LaunchPlanManager: launchPlanManager, - ExecutionManager: executionManager, - NamedEntityManager: namedEntityManager, - VersionManager: versionManager, - NodeExecutionManager: manager.NewNodeExecutionManager(repo, configuration, applicationConfiguration.GetMetadataStoragePrefix(), dataStorageClient, - adminScope.NewSubScope("node_execution_manager"), urlData, eventPublisher, cloudEventPublisher, nodeExecutionEventWriter), - TaskExecutionManager: manager.NewTaskExecutionManager(repo, configuration, dataStorageClient, - adminScope.NewSubScope("task_execution_manager"), urlData, eventPublisher, cloudEventPublisher), - ProjectManager: manager.NewProjectManager(repo, configuration), - ResourceManager: resources.NewResourceManager(repo, configuration.ApplicationConfiguration()), - Metrics: InitMetrics(adminScope), + WorkflowManager: workflowManager, + LaunchPlanManager: launchPlanManager, + ExecutionManager: executionManager, + NamedEntityManager: namedEntityManager, + DescriptionEntityManager: descriptionEntityManager, + VersionManager: versionManager, + NodeExecutionManager: nodeExecutionManager, + TaskExecutionManager: taskExecutionManager, + ProjectManager: manager.NewProjectManager(repo, configuration), + ResourceManager: resources.NewResourceManager(repo, configuration.ApplicationConfiguration()), + MetricsManager: manager.NewMetricsManager(workflowManager, executionManager, nodeExecutionManager, + taskExecutionManager, adminScope.NewSubScope("metrics_manager")), + Metrics: InitMetrics(adminScope), } } diff --git a/pkg/rpc/adminservice/description_entity.go b/pkg/rpc/adminservice/description_entity.go new file mode 100644 index 000000000..72a9854fe --- /dev/null +++ b/pkg/rpc/adminservice/description_entity.go @@ -0,0 +1,53 @@ +package adminservice + +import ( + "context" + + "github.com/flyteorg/flyteadmin/pkg/rpc/adminservice/util" + + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flytestdlib/logger" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (m *AdminService) GetDescriptionEntity(ctx context.Context, request *admin.ObjectGetRequest) (*admin.DescriptionEntity, error) { + defer m.interceptPanic(ctx, request) + if request == nil { + return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") + } + // NOTE: When the Get HTTP endpoint is called the resource type is implicit (from the URL) so we must add it + // to the request. + if request.Id != nil && request.Id.ResourceType == core.ResourceType_UNSPECIFIED { + logger.Infof(ctx, "Adding resource type for unspecified value in request: [%+v]", request) + request.Id.ResourceType = core.ResourceType_TASK + } + var response *admin.DescriptionEntity + var err error + m.Metrics.descriptionEntityMetrics.get.Time(func() { + response, err = m.DescriptionEntityManager.GetDescriptionEntity(ctx, *request) + }) + if err != nil { + return nil, util.TransformAndRecordError(err, &m.Metrics.descriptionEntityMetrics.get) + } + m.Metrics.descriptionEntityMetrics.get.Success() + return response, nil +} + +func (m *AdminService) ListDescriptionEntities(ctx context.Context, request *admin.DescriptionEntityListRequest) (*admin.DescriptionEntityList, error) { + defer m.interceptPanic(ctx, request) + if request == nil { + return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") + } + var response *admin.DescriptionEntityList + var err error + m.Metrics.descriptionEntityMetrics.list.Time(func() { + response, err = m.DescriptionEntityManager.ListDescriptionEntity(ctx, *request) + }) + if err != nil { + return nil, util.TransformAndRecordError(err, &m.Metrics.descriptionEntityMetrics.list) + } + m.Metrics.descriptionEntityMetrics.list.Success() + return response, nil +} diff --git a/pkg/rpc/adminservice/execution.go b/pkg/rpc/adminservice/execution.go index 57680a58f..87e2fe487 100644 --- a/pkg/rpc/adminservice/execution.go +++ b/pkg/rpc/adminservice/execution.go @@ -131,7 +131,7 @@ func (m *AdminService) GetExecutionData( } var response *admin.WorkflowExecutionGetDataResponse var err error - m.Metrics.executionEndpointMetrics.get.Time(func() { + m.Metrics.executionEndpointMetrics.getData.Time(func() { response, err = m.ExecutionManager.GetExecutionData(ctx, *request) }) if err != nil { @@ -141,6 +141,24 @@ func (m *AdminService) GetExecutionData( return response, nil } +func (m *AdminService) GetExecutionMetrics( + ctx context.Context, request *admin.WorkflowExecutionGetMetricsRequest) (*admin.WorkflowExecutionGetMetricsResponse, error) { + defer m.interceptPanic(ctx, request) + if request == nil { + return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") + } + var response *admin.WorkflowExecutionGetMetricsResponse + var err error + m.Metrics.executionEndpointMetrics.getMetrics.Time(func() { + response, err = m.MetricsManager.GetExecutionMetrics(ctx, *request) + }) + if err != nil { + return nil, util.TransformAndRecordError(err, &m.Metrics.executionEndpointMetrics.getMetrics) + } + m.Metrics.executionEndpointMetrics.getMetrics.Success() + return response, nil +} + func (m *AdminService) ListExecutions( ctx context.Context, request *admin.ResourceListRequest) (*admin.ExecutionList, error) { defer m.interceptPanic(ctx, request) diff --git a/pkg/rpc/adminservice/metrics.go b/pkg/rpc/adminservice/metrics.go index c4f40211f..b2bab4514 100644 --- a/pkg/rpc/adminservice/metrics.go +++ b/pkg/rpc/adminservice/metrics.go @@ -17,6 +17,7 @@ type executionEndpointMetrics struct { get util.RequestMetrics update util.RequestMetrics getData util.RequestMetrics + getMetrics util.RequestMetrics list util.RequestMetrics terminate util.RequestMetrics } @@ -47,6 +48,7 @@ type nodeExecutionEndpointMetrics struct { createEvent util.RequestMetrics get util.RequestMetrics getData util.RequestMetrics + getMetrics util.RequestMetrics list util.RequestMetrics listChildren util.RequestMetrics } @@ -95,6 +97,14 @@ type workflowEndpointMetrics struct { listIds util.RequestMetrics } +type descriptionEntityEndpointMetrics struct { + scope promutils.Scope + + create util.RequestMetrics + get util.RequestMetrics + list util.RequestMetrics +} + type AdminMetrics struct { Scope promutils.Scope PanicCounter prometheus.Counter @@ -111,6 +121,7 @@ type AdminMetrics struct { taskEndpointMetrics taskEndpointMetrics taskExecutionEndpointMetrics taskExecutionEndpointMetrics workflowEndpointMetrics workflowEndpointMetrics + descriptionEntityMetrics descriptionEntityEndpointMetrics } func InitMetrics(adminScope promutils.Scope) AdminMetrics { @@ -128,6 +139,7 @@ func InitMetrics(adminScope promutils.Scope) AdminMetrics { get: util.NewRequestMetrics(adminScope, "get_execution"), update: util.NewRequestMetrics(adminScope, "update_execution"), getData: util.NewRequestMetrics(adminScope, "get_execution_data"), + getMetrics: util.NewRequestMetrics(adminScope, "get_execution_metrics"), list: util.NewRequestMetrics(adminScope, "list_execution"), terminate: util.NewRequestMetrics(adminScope, "terminate_execution"), }, @@ -152,6 +164,7 @@ func InitMetrics(adminScope promutils.Scope) AdminMetrics { createEvent: util.NewRequestMetrics(adminScope, "create_node_execution_event"), get: util.NewRequestMetrics(adminScope, "get_node_execution"), getData: util.NewRequestMetrics(adminScope, "get_node_execution_data"), + getMetrics: util.NewRequestMetrics(adminScope, "get_node_execution_metrics"), list: util.NewRequestMetrics(adminScope, "list_node_execution"), listChildren: util.NewRequestMetrics(adminScope, "list_children_node_executions"), }, @@ -204,5 +217,12 @@ func InitMetrics(adminScope promutils.Scope) AdminMetrics { list: util.NewRequestMetrics(adminScope, "list_workflow"), listIds: util.NewRequestMetrics(adminScope, "list_workflow_ids"), }, + + descriptionEntityMetrics: descriptionEntityEndpointMetrics{ + scope: adminScope, + create: util.NewRequestMetrics(adminScope, "create_description_entity"), + get: util.NewRequestMetrics(adminScope, "get_description_entity"), + list: util.NewRequestMetrics(adminScope, "list_description_entity"), + }, } } diff --git a/pkg/rpc/signal_service.go b/pkg/rpc/signal_service.go new file mode 100644 index 000000000..2487003d9 --- /dev/null +++ b/pkg/rpc/signal_service.go @@ -0,0 +1,147 @@ +package rpc + +import ( + "context" + "fmt" + "runtime/debug" + + manager "github.com/flyteorg/flyteadmin/pkg/manager/impl" + "github.com/flyteorg/flyteadmin/pkg/manager/interfaces" + "github.com/flyteorg/flyteadmin/pkg/repositories" + "github.com/flyteorg/flyteadmin/pkg/repositories/errors" + "github.com/flyteorg/flyteadmin/pkg/rpc/adminservice/util" + runtimeIfaces "github.com/flyteorg/flyteadmin/pkg/runtime/interfaces" + + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/service" + + "github.com/flyteorg/flytestdlib/logger" + "github.com/flyteorg/flytestdlib/promutils" + + "github.com/golang/protobuf/proto" + + "github.com/prometheus/client_golang/prometheus" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type SignalMetrics struct { + scope promutils.Scope + panicCounter prometheus.Counter + + create util.RequestMetrics + get util.RequestMetrics +} + +func NewSignalMetrics(scope promutils.Scope) SignalMetrics { + return SignalMetrics{ + scope: scope, + panicCounter: scope.MustNewCounter("handler_panic", + "panics encountered while handling requests to the admin service"), + create: util.NewRequestMetrics(scope, "create_signal"), + get: util.NewRequestMetrics(scope, "get_signal"), + } +} + +type SignalService struct { + service.UnimplementedSignalServiceServer + signalManager interfaces.SignalInterface + metrics SignalMetrics +} + +func NewSignalServer(ctx context.Context, configuration runtimeIfaces.Configuration, adminScope promutils.Scope) *SignalService { + panicCounter := adminScope.MustNewCounter("initialization_panic", + "panics encountered initializing the signal service") + + defer func() { + if err := recover(); err != nil { + panicCounter.Inc() + logger.Fatalf(ctx, fmt.Sprintf("caught panic: %v [%+v]", err, string(debug.Stack()))) + } + }() + + databaseConfig := configuration.ApplicationConfiguration().GetDbConfig() + logConfig := logger.GetConfig() + + db, err := repositories.GetDB(ctx, databaseConfig, logConfig) + if err != nil { + logger.Fatal(ctx, err) + } + dbScope := adminScope.NewSubScope("database") + repo := repositories.NewGormRepo( + db, errors.NewPostgresErrorTransformer(adminScope.NewSubScope("errors")), dbScope) + + signalManager := manager.NewSignalManager(repo, adminScope.NewSubScope("signal_manager")) + + logger.Info(ctx, "Initializing a new SignalService") + return &SignalService{ + signalManager: signalManager, + metrics: NewSignalMetrics(adminScope), + } +} + +// Intercepts all admin requests to handle panics during execution. +func (s *SignalService) interceptPanic(ctx context.Context, request proto.Message) { + err := recover() + if err == nil { + return + } + + s.metrics.panicCounter.Inc() + logger.Fatalf(ctx, "panic-ed for request: [%+v] with err: %v with Stack: %v", request, err, string(debug.Stack())) +} + +func (s *SignalService) GetOrCreateSignal( + ctx context.Context, request *admin.SignalGetOrCreateRequest) (*admin.Signal, error) { + defer s.interceptPanic(ctx, request) + if request == nil { + return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") + } + var response *admin.Signal + var err error + s.metrics.create.Time(func() { + response, err = s.signalManager.GetOrCreateSignal(ctx, *request) + }) + if err != nil { + return nil, util.TransformAndRecordError(err, &s.metrics.create) + } + s.metrics.create.Success() + return response, nil +} + +func (s *SignalService) ListSignals( + ctx context.Context, request *admin.SignalListRequest) (*admin.SignalList, error) { + defer s.interceptPanic(ctx, request) + if request == nil { + return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") + } + var response *admin.SignalList + var err error + s.metrics.get.Time(func() { + response, err = s.signalManager.ListSignals(ctx, *request) + }) + if err != nil { + return nil, util.TransformAndRecordError(err, &s.metrics.get) + } + s.metrics.get.Success() + return response, nil +} + +func (s *SignalService) SetSignal( + ctx context.Context, request *admin.SignalSetRequest) (*admin.SignalSetResponse, error) { + defer s.interceptPanic(ctx, request) + if request == nil { + return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") + } + var response *admin.SignalSetResponse + var err error + s.metrics.get.Time(func() { + response, err = s.signalManager.SetSignal(ctx, *request) + }) + if err != nil { + return nil, util.TransformAndRecordError(err, &s.metrics.get) + } + s.metrics.get.Success() + return response, nil +} diff --git a/pkg/rpc/signal_service_test.go b/pkg/rpc/signal_service_test.go new file mode 100644 index 000000000..b987edfdd --- /dev/null +++ b/pkg/rpc/signal_service_test.go @@ -0,0 +1,148 @@ +package rpc + +import ( + "context" + "errors" + "testing" + + "github.com/flyteorg/flyteadmin/pkg/manager/mocks" + + "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" + + mockScope "github.com/flyteorg/flytestdlib/promutils" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestGetOrCreateSignal(t *testing.T) { + ctx := context.Background() + + t.Run("Happy", func(t *testing.T) { + signalManager := mocks.SignalInterface{} + signalManager.OnGetOrCreateSignalMatch(mock.Anything, mock.Anything).Return(&admin.Signal{}, nil) + + testScope := mockScope.NewTestScope() + mockServer := &SignalService{ + signalManager: &signalManager, + metrics: NewSignalMetrics(testScope), + } + + _, err := mockServer.GetOrCreateSignal(ctx, &admin.SignalGetOrCreateRequest{}) + assert.NoError(t, err) + }) + + t.Run("NilRequestError", func(t *testing.T) { + signalManager := mocks.SignalInterface{} + testScope := mockScope.NewTestScope() + mockServer := &SignalService{ + signalManager: &signalManager, + metrics: NewSignalMetrics(testScope), + } + + _, err := mockServer.GetOrCreateSignal(ctx, nil) + assert.Error(t, err) + }) + + t.Run("ManagerError", func(t *testing.T) { + signalManager := mocks.SignalInterface{} + signalManager.OnGetOrCreateSignalMatch(mock.Anything, mock.Anything).Return(nil, errors.New("foo")) + + testScope := mockScope.NewTestScope() + mockServer := &SignalService{ + signalManager: &signalManager, + metrics: NewSignalMetrics(testScope), + } + + _, err := mockServer.GetOrCreateSignal(ctx, &admin.SignalGetOrCreateRequest{}) + assert.Error(t, err) + }) +} + +func TestListSignals(t *testing.T) { + ctx := context.Background() + + t.Run("Happy", func(t *testing.T) { + signalManager := mocks.SignalInterface{} + signalManager.OnListSignalsMatch(mock.Anything, mock.Anything).Return(&admin.SignalList{}, nil) + + testScope := mockScope.NewTestScope() + mockServer := &SignalService{ + signalManager: &signalManager, + metrics: NewSignalMetrics(testScope), + } + + _, err := mockServer.ListSignals(ctx, &admin.SignalListRequest{}) + assert.NoError(t, err) + }) + + t.Run("NilRequestError", func(t *testing.T) { + signalManager := mocks.SignalInterface{} + testScope := mockScope.NewTestScope() + mockServer := &SignalService{ + signalManager: &signalManager, + metrics: NewSignalMetrics(testScope), + } + + _, err := mockServer.ListSignals(ctx, nil) + assert.Error(t, err) + }) + + t.Run("ManagerError", func(t *testing.T) { + signalManager := mocks.SignalInterface{} + signalManager.OnListSignalsMatch(mock.Anything, mock.Anything).Return(nil, errors.New("foo")) + + testScope := mockScope.NewTestScope() + mockServer := &SignalService{ + signalManager: &signalManager, + metrics: NewSignalMetrics(testScope), + } + + _, err := mockServer.ListSignals(ctx, &admin.SignalListRequest{}) + assert.Error(t, err) + }) +} + +func TestSetSignal(t *testing.T) { + ctx := context.Background() + + t.Run("Happy", func(t *testing.T) { + signalManager := mocks.SignalInterface{} + signalManager.OnSetSignalMatch(mock.Anything, mock.Anything).Return(&admin.SignalSetResponse{}, nil) + + testScope := mockScope.NewTestScope() + mockServer := &SignalService{ + signalManager: &signalManager, + metrics: NewSignalMetrics(testScope), + } + + _, err := mockServer.SetSignal(ctx, &admin.SignalSetRequest{}) + assert.NoError(t, err) + }) + + t.Run("NilRequestError", func(t *testing.T) { + signalManager := mocks.SignalInterface{} + testScope := mockScope.NewTestScope() + mockServer := &SignalService{ + signalManager: &signalManager, + metrics: NewSignalMetrics(testScope), + } + + _, err := mockServer.SetSignal(ctx, nil) + assert.Error(t, err) + }) + + t.Run("ManagerError", func(t *testing.T) { + signalManager := mocks.SignalInterface{} + signalManager.OnSetSignalMatch(mock.Anything, mock.Anything).Return(nil, errors.New("foo")) + + testScope := mockScope.NewTestScope() + mockServer := &SignalService{ + signalManager: &signalManager, + metrics: NewSignalMetrics(testScope), + } + + _, err := mockServer.SetSignal(ctx, &admin.SignalSetRequest{}) + assert.Error(t, err) + }) +} diff --git a/pkg/runtime/interfaces/application_configuration.go b/pkg/runtime/interfaces/application_configuration.go index 21ca5fd7e..16b1f921d 100644 --- a/pkg/runtime/interfaces/application_configuration.go +++ b/pkg/runtime/interfaces/application_configuration.go @@ -1,7 +1,6 @@ package interfaces import ( - "github.com/Shopify/sarama" "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/admin" "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/core" "github.com/flyteorg/flytestdlib/config" @@ -78,6 +77,11 @@ type ApplicationConfig struct { Annotations map[string]string `json:"annotations,omitempty"` // Interruptible indicates whether all tasks should be run as interruptible by default (unless specified otherwise via the execution/workflow/task definition) Interruptible bool `json:"interruptible"` + // OverwriteCache indicates all workflows and tasks should skip all their cached results and re-compute their outputs, + // overwriting any already stored data. + // Note that setting this setting to `true` effectively disabled all caching in Flyte as all executions launched + // will have their OverwriteCache setting enabled. + OverwriteCache bool `json:"overwriteCache"` // Optional: security context override to apply this execution. // iam_role references the fully qualified name of Identity & Access Management role to impersonate. @@ -158,11 +162,17 @@ func (a *ApplicationConfig) GetInterruptible() *wrappers.BoolValue { } } +func (a *ApplicationConfig) GetOverwriteCache() bool { + return a.OverwriteCache +} + // GetAsWorkflowExecutionConfig returns the WorkflowExecutionConfig as extracted from this object func (a *ApplicationConfig) GetAsWorkflowExecutionConfig() admin.WorkflowExecutionConfig { - // These two should always be set, one is a number, and the other returns nil when empty. + // These values should always be set as their fallback values equals to their zero value or nil, + // providing a sensible default even if the actual value was not set. wec := admin.WorkflowExecutionConfig{ MaxParallelism: a.GetMaxParallelism(), + OverwriteCache: a.GetOverwriteCache(), Interruptible: a.GetInterruptible(), } @@ -194,7 +204,8 @@ type GCPConfig struct { } type KafkaConfig struct { - Version sarama.KafkaVersion + // The version of Kafka, e.g. 2.1.0, 0.8.2.0 + Version string `json:"version"` // kafka broker addresses Brokers []string `json:"brokers"` } @@ -344,12 +355,18 @@ type FlyteWorkflowExecutorConfig struct { // eg : 100 TPS will send at the max 100 schedule requests to admin per sec. // Burst specifies burst traffic count AdminRateLimit *AdminRateLimit `json:"adminRateLimit"` + // Defaults to using user local timezone where the scheduler is deployed. + UseUTCTz bool `json:"useUTCTz"` } func (f *FlyteWorkflowExecutorConfig) GetAdminRateLimit() *AdminRateLimit { return f.AdminRateLimit } +func (f *FlyteWorkflowExecutorConfig) GetUseUTCTz() bool { + return f.UseUTCTz +} + type AdminRateLimit struct { Tps rate.Limit `json:"tps"` Burst int `json:"burst"` diff --git a/pkg/runtime/interfaces/cluster_configuration.go b/pkg/runtime/interfaces/cluster_configuration.go index 100e6f004..491caed3f 100644 --- a/pkg/runtime/interfaces/cluster_configuration.go +++ b/pkg/runtime/interfaces/cluster_configuration.go @@ -3,15 +3,18 @@ package interfaces import ( "io/ioutil" + "github.com/flyteorg/flyteadmin/pkg/config" + "github.com/pkg/errors" ) // Holds details about a cluster used for workflow execution. type ClusterConfig struct { - Name string `json:"name"` - Endpoint string `json:"endpoint"` - Auth Auth `json:"auth"` - Enabled bool `json:"enabled"` + Name string `json:"name"` + Endpoint string `json:"endpoint"` + Auth Auth `json:"auth"` + Enabled bool `json:"enabled"` + KubeClientConfig *config.KubeClientConfig `json:"kubeClientConfig,omitempty"` } type Auth struct { diff --git a/pkg/runtime/interfaces/inlineeventdatapolicy_enumer.go b/pkg/runtime/interfaces/inlineeventdatapolicy_enumer.go index 63ff94e89..7c3895b2c 100644 --- a/pkg/runtime/interfaces/inlineeventdatapolicy_enumer.go +++ b/pkg/runtime/interfaces/inlineeventdatapolicy_enumer.go @@ -1,6 +1,5 @@ // Code generated by "enumer -type=InlineEventDataPolicy -trimprefix=InlineEventDataPolicy"; DO NOT EDIT. -// package interfaces import ( diff --git a/pkg/server/service.go b/pkg/server/service.go index 85af63bd5..4f1f58ffb 100644 --- a/pkg/server/service.go +++ b/pkg/server/service.go @@ -25,6 +25,7 @@ import ( "github.com/flyteorg/flyteadmin/auth/interfaces" "github.com/flyteorg/flyteadmin/pkg/common" "github.com/flyteorg/flyteadmin/pkg/config" + "github.com/flyteorg/flyteadmin/pkg/rpc" "github.com/flyteorg/flyteadmin/pkg/rpc/adminservice" runtimeIfaces "github.com/flyteorg/flyteadmin/pkg/runtime/interfaces" "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/service" @@ -111,13 +112,14 @@ func newGRPCServer(ctx context.Context, pluginRegistry *plugins.Registry, cfg *c } configuration := runtime2.NewConfigurationProvider() - service.RegisterAdminServiceServer(grpcServer, adminservice.NewAdminServer(ctx, pluginRegistry, configuration, cfg.KubeConfig, cfg.Master, dataStorageClient, scope.NewSubScope("admin"))) + adminServer := adminservice.NewAdminServer(ctx, pluginRegistry, configuration, cfg.KubeConfig, cfg.Master, dataStorageClient, scope.NewSubScope("admin")) + service.RegisterAdminServiceServer(grpcServer, adminServer) if cfg.Security.UseAuth { service.RegisterAuthMetadataServiceServer(grpcServer, authCtx.AuthMetadataService()) service.RegisterIdentityServiceServer(grpcServer, authCtx.IdentityService()) } - dataProxySvc, err := dataproxy.NewService(cfg.DataProxy, dataStorageClient) + dataProxySvc, err := dataproxy.NewService(cfg.DataProxy, adminServer.NodeExecutionManager, dataStorageClient) if err != nil { return nil, fmt.Errorf("failed to initialize dataProxy service. Error: %w", err) } @@ -125,6 +127,8 @@ func newGRPCServer(ctx context.Context, pluginRegistry *plugins.Registry, cfg *c pluginRegistry.RegisterDefault(plugins.PluginIDDataProxy, dataProxySvc) service.RegisterDataProxyServiceServer(grpcServer, plugins.Get[service.DataProxyServiceServer](pluginRegistry, plugins.PluginIDDataProxy)) + service.RegisterSignalServiceServer(grpcServer, rpc.NewSignalServer(ctx, configuration, scope.NewSubScope("signal"))) + healthServer := health.NewServer() healthServer.SetServingStatus("flyteadmin", grpc_health_v1.HealthCheckResponse_SERVING) grpc_health_v1.RegisterHealthServer(grpcServer, healthServer) @@ -178,6 +182,9 @@ func newHTTPServer(ctx context.Context, cfg *config.ServerConfig, _ *authConfig. // This option means that http requests are served with protobufs, instead of json. We always want this. gwmuxOptions = append(gwmuxOptions, runtime.WithMarshalerOption("application/octet-stream", &runtime.ProtoMarshaller{})) + // This option sets subject in the user info response + gwmuxOptions = append(gwmuxOptions, runtime.WithForwardResponseOption(auth.GetUserInfoForwardResponseHandler())) + if cfg.Security.UseAuth { // Add HTTP handlers for OIDC endpoints auth.RegisterHandlers(ctx, mux, authCtx) @@ -216,6 +223,11 @@ func newHTTPServer(ctx context.Context, cfg *config.ServerConfig, _ *authConfig. return nil, errors.Wrap(err, "error registering data proxy service") } + err = service.RegisterSignalServiceHandlerFromEndpoint(ctx, gwmux, grpcAddress, grpcConnectionOpts) + if err != nil { + return nil, errors.Wrap(err, "error registering signal service") + } + mux.Handle("/", gwmux) return mux, nil diff --git a/pkg/workflowengine/impl/prepare_execution.go b/pkg/workflowengine/impl/prepare_execution.go index 43499922b..f2a778e27 100644 --- a/pkg/workflowengine/impl/prepare_execution.go +++ b/pkg/workflowengine/impl/prepare_execution.go @@ -57,10 +57,13 @@ func addExecutionOverrides(taskPluginOverrides []*admin.PluginOverride, } if workflowExecutionConfig != nil { executionConfig.MaxParallelism = uint32(workflowExecutionConfig.MaxParallelism) + if workflowExecutionConfig.GetInterruptible() != nil { interruptible := workflowExecutionConfig.GetInterruptible().GetValue() executionConfig.Interruptible = &interruptible } + + executionConfig.OverwriteCache = workflowExecutionConfig.GetOverwriteCache() } if taskResources != nil { var requests = v1alpha1.TaskResourceSpec{} diff --git a/pkg/workflowengine/impl/prepare_execution_test.go b/pkg/workflowengine/impl/prepare_execution_test.go index bba2b1066..38e155636 100644 --- a/pkg/workflowengine/impl/prepare_execution_test.go +++ b/pkg/workflowengine/impl/prepare_execution_test.go @@ -158,6 +158,14 @@ func TestAddExecutionOverrides(t *testing.T) { assert.NotNil(t, workflow.ExecutionConfig.Interruptible) assert.True(t, *workflow.ExecutionConfig.Interruptible) }) + t.Run("skip cache", func(t *testing.T) { + workflowExecutionConfig := &admin.WorkflowExecutionConfig{ + OverwriteCache: true, + } + workflow := &v1alpha1.FlyteWorkflow{} + addExecutionOverrides(nil, workflowExecutionConfig, nil, nil, workflow) + assert.True(t, workflow.ExecutionConfig.OverwriteCache) + }) } func TestPrepareFlyteWorkflow(t *testing.T) { diff --git a/scheduler/core/gocron_scheduler.go b/scheduler/core/gocron_scheduler.go index 15ca50f8c..a9d068b06 100644 --- a/scheduler/core/gocron_scheduler.go +++ b/scheduler/core/gocron_scheduler.go @@ -327,9 +327,13 @@ func getFixedRateDurationFromSchedule(unit admin.FixedRateUnit, fixedRateValue u } func NewGoCronScheduler(ctx context.Context, schedules []models.SchedulableEntity, scope promutils.Scope, - snapshot snapshoter.Snapshot, rateLimiter *rate.Limiter, executor executor.Executor) Scheduler { + snapshot snapshoter.Snapshot, rateLimiter *rate.Limiter, executor executor.Executor, useUtcTz bool) Scheduler { // Create the new cron scheduler and start it off - c := cron.New() + var opts []cron.Option + if useUtcTz { + opts = append(opts, cron.WithLocation(time.UTC)) + } + c := cron.New(opts...) c.Start() scheduler := &GoCronScheduler{ cron: c, diff --git a/scheduler/core/gocron_scheduler_test.go b/scheduler/core/gocron_scheduler_test.go index afc60be72..ce4f7187c 100644 --- a/scheduler/core/gocron_scheduler_test.go +++ b/scheduler/core/gocron_scheduler_test.go @@ -25,7 +25,7 @@ var scheduleCronDeactivated models.SchedulableEntity var scheduleFixedDeactivated models.SchedulableEntity var scheduleNonExistentDeActivated models.SchedulableEntity -func setup(t *testing.T, subscope string) *GoCronScheduler { +func setup(t *testing.T, subscope string, useUtcTz bool) *GoCronScheduler { configuration := runtime.NewConfigurationProvider() applicationConfiguration := configuration.ApplicationConfiguration().GetTopLevelConfig() schedulerScope := promutils.NewScope(applicationConfiguration.MetricsScope).NewSubScope(subscope) @@ -109,7 +109,7 @@ func setup(t *testing.T, subscope string) *GoCronScheduler { executor.OnExecuteMatch(mock.Anything, mock.Anything, mock.Anything).Return(nil) snapshot := &snapshoter.SnapshotV1{} - g := NewGoCronScheduler(context.Background(), schedules, schedulerScope, snapshot, rateLimiter, executor) + g := NewGoCronScheduler(context.Background(), schedules, schedulerScope, snapshot, rateLimiter, executor, useUtcTz) goCronScheduler, ok := g.(*GoCronScheduler) assert.True(t, ok) goCronScheduler.UpdateSchedules(context.Background(), schedules) @@ -118,17 +118,31 @@ func setup(t *testing.T, subscope string) *GoCronScheduler { return goCronScheduler } +func TestUseUTCTz(t *testing.T) { + t.Run("use local timezone", func(t *testing.T) { + g := setup(t, "use_local_tz", false) + loc := g.cron.Location() + assert.NotNil(t, loc) + assert.Equal(t, time.Local, loc) + }) + t.Run("use utc timezone", func(t *testing.T) { + g := setup(t, "use_utc_tz", true) + loc := g.cron.Location() + assert.NotNil(t, loc) + assert.Equal(t, time.UTC, loc) + }) +} func TestCalculateSnapshot(t *testing.T) { t.Run("empty snapshot", func(t *testing.T) { ctx := context.Background() - g := setup(t, "empty_snapshot") + g := setup(t, "empty_snapshot", false) snapshot := g.CalculateSnapshot(ctx) assert.NotNil(t, snapshot) assert.True(t, snapshot.IsEmpty()) }) t.Run("non empty snapshot", func(t *testing.T) { ctx := context.Background() - g := setup(t, "non_empty_snapshot") + g := setup(t, "non_empty_snapshot", false) g.jobStore.Range(func(key, value interface{}) bool { currTime := time.Now() job := value.(*GoCronJob) @@ -155,7 +169,7 @@ func TestGetTimedFuncWithSchedule(t *testing.T) { } for _, tc := range tests { ctx := context.Background() - g := setup(t, tc.scope) + g := setup(t, tc.scope, false) timeFunc := g.GetTimedFuncWithSchedule() assert.NotNil(t, timeFunc) err := timeFunc(ctx, tc.input, time.Now()) @@ -164,7 +178,7 @@ func TestGetTimedFuncWithSchedule(t *testing.T) { }) t.Run("failure case", func(t *testing.T) { ctx := context.Background() - g := setup(t, "failure_case") + g := setup(t, "failure_case", false) executor := new(mocks.Executor) executor.OnExecuteMatch(mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("failure case")) g.executor = executor @@ -271,7 +285,7 @@ func TestGetFixedRateDurationFromSchedule(t *testing.T) { func TestCatchUpAllSchedule(t *testing.T) { ctx := context.Background() - g := setup(t, "catch_up_all_schedules") + g := setup(t, "catch_up_all_schedules", false) toTime := time.Date(2022, time.January, 29, 0, 0, 0, 0, time.UTC) catchupSuccess := g.CatchupAll(ctx, toTime) assert.True(t, catchupSuccess) diff --git a/scheduler/schedule_executor.go b/scheduler/schedule_executor.go index eba1886fe..9a4afbe07 100644 --- a/scheduler/schedule_executor.go +++ b/scheduler/schedule_executor.go @@ -68,7 +68,8 @@ func (w *ScheduledExecutor) Run(ctx context.Context) error { // Also Bootstrap the schedules from the snapshot bootStrapCtx, bootStrapCancel := context.WithCancel(ctx) defer bootStrapCancel() - gcronScheduler := core.NewGoCronScheduler(bootStrapCtx, schedules, w.scope, snapshot, rateLimiter, executor) + useUtcTz := w.workflowExecutorConfig.UseUTCTz + gcronScheduler := core.NewGoCronScheduler(bootStrapCtx, schedules, w.scope, snapshot, rateLimiter, executor, useUtcTz) w.scheduler = gcronScheduler // Start the go routine to write the update schedules periodically diff --git a/tests/bootstrap.go b/tests/bootstrap.go index e9804aa70..b5a3477b7 100644 --- a/tests/bootstrap.go +++ b/tests/bootstrap.go @@ -65,6 +65,7 @@ func truncateAllTablesForTestingOnly() { TruncateExecutionEvents := fmt.Sprintf("TRUNCATE TABLE execution_events;") TruncateNamedEntityMetadata := fmt.Sprintf("TRUNCATE TABLE named_entity_metadata;") + TruncateDescriptionEntity := fmt.Sprintf("TRUNCATE TABLE description_entities;") TruncateNodeExecutions := fmt.Sprintf("TRUNCATE TABLE node_executions;") TruncateNodeExecutionEvents := fmt.Sprintf("TRUNCATE TABLE node_execution_events;") TruncateTaskExecutions := fmt.Sprintf("TRUNCATE TABLE task_executions;") @@ -92,6 +93,7 @@ func truncateAllTablesForTestingOnly() { db.Exec(TruncateExecutions) db.Exec(TruncateExecutionEvents) db.Exec(TruncateNamedEntityMetadata) + db.Exec(TruncateDescriptionEntity) db.Exec(TruncateNodeExecutions) db.Exec(TruncateNodeExecutionEvents) db.Exec(TruncateTaskExecutions) diff --git a/tests/node_execution_test.go b/tests/node_execution_test.go index 09050f7b7..b3d17658c 100644 --- a/tests/node_execution_test.go +++ b/tests/node_execution_test.go @@ -42,9 +42,11 @@ func TestCreateNodeExecution(t *testing.T) { _, err := client.CreateNodeEvent(ctx, &admin.NodeExecutionEventRequest{ RequestId: "request id", Event: &event.NodeExecutionEvent{ - Id: nodeExecutionId, - Phase: core.NodeExecution_RUNNING, - InputUri: inputURI, + Id: nodeExecutionId, + Phase: core.NodeExecution_RUNNING, + InputValue: &event.NodeExecutionEvent_InputUri{ + InputUri: inputURI, + }, OccurredAt: occurredAtProto, }, }) @@ -73,9 +75,11 @@ func TestCreateNodeExecutionWithParent(t *testing.T) { _, err := client.CreateNodeEvent(ctx, &admin.NodeExecutionEventRequest{ RequestId: "request id", Event: &event.NodeExecutionEvent{ - Id: nodeExecutionId, - Phase: core.NodeExecution_RUNNING, - InputUri: inputURI, + Id: nodeExecutionId, + Phase: core.NodeExecution_RUNNING, + InputValue: &event.NodeExecutionEvent_InputUri{ + InputUri: inputURI, + }, OccurredAt: occurredAtProto, }, }) @@ -95,8 +99,10 @@ func TestCreateNodeExecutionWithParent(t *testing.T) { NodeId: "child", ExecutionId: nodeExecutionId.ExecutionId, }, - Phase: core.NodeExecution_RUNNING, - InputUri: inputURI, + Phase: core.NodeExecution_RUNNING, + InputValue: &event.NodeExecutionEvent_InputUri{ + InputUri: inputURI, + }, OccurredAt: occurredAtProto, SpecNodeId: "spec", RetryGroup: "1", @@ -140,9 +146,11 @@ func TestCreateAndUpdateNodeExecution(t *testing.T) { _, err := client.CreateNodeEvent(ctx, &admin.NodeExecutionEventRequest{ RequestId: "request id", Event: &event.NodeExecutionEvent{ - Id: nodeExecutionId, - Phase: core.NodeExecution_RUNNING, - InputUri: inputURI, + Id: nodeExecutionId, + Phase: core.NodeExecution_RUNNING, + InputValue: &event.NodeExecutionEvent_InputUri{ + InputUri: inputURI, + }, OccurredAt: beganRunningAtProto, }, }) @@ -162,9 +170,11 @@ func TestCreateAndUpdateNodeExecution(t *testing.T) { _, err = client.CreateNodeEvent(ctx, &admin.NodeExecutionEventRequest{ RequestId: "other request id", Event: &event.NodeExecutionEvent{ - Id: otherNodeExecutionID, - Phase: core.NodeExecution_QUEUED, - InputUri: inputURI, + Id: otherNodeExecutionID, + Phase: core.NodeExecution_QUEUED, + InputValue: &event.NodeExecutionEvent_InputUri{ + InputUri: inputURI, + }, OccurredAt: otherBeganRunningAtProto, }, }) @@ -176,9 +186,11 @@ func TestCreateAndUpdateNodeExecution(t *testing.T) { _, err = client.CreateNodeEvent(ctx, &admin.NodeExecutionEventRequest{ RequestId: "request id", Event: &event.NodeExecutionEvent{ - Id: nodeExecutionId, - Phase: core.NodeExecution_SUCCEEDED, - InputUri: inputURI, + Id: nodeExecutionId, + Phase: core.NodeExecution_SUCCEEDED, + InputValue: &event.NodeExecutionEvent_InputUri{ + InputUri: inputURI, + }, OccurredAt: succeededAtProto, OutputResult: &event.NodeExecutionEvent_OutputUri{ OutputUri: outputURI, @@ -232,9 +244,11 @@ func TestCreateAndListNodeExecutions(t *testing.T) { _, err = client.CreateNodeEvent(ctx, &admin.NodeExecutionEventRequest{ RequestId: "request id", Event: &event.NodeExecutionEvent{ - Id: nodeExecutionId, - Phase: core.NodeExecution_RUNNING, - InputUri: inputURI, + Id: nodeExecutionId, + Phase: core.NodeExecution_RUNNING, + InputValue: &event.NodeExecutionEvent_InputUri{ + InputUri: inputURI, + }, OccurredAt: occurredAtProto, }, }) @@ -271,9 +285,11 @@ func TestListNodeExecutionWithParent(t *testing.T) { _, err := client.CreateNodeEvent(ctx, &admin.NodeExecutionEventRequest{ RequestId: "request id", Event: &event.NodeExecutionEvent{ - Id: nodeExecutionId, - Phase: core.NodeExecution_RUNNING, - InputUri: inputURI, + Id: nodeExecutionId, + Phase: core.NodeExecution_RUNNING, + InputValue: &event.NodeExecutionEvent_InputUri{ + InputUri: inputURI, + }, OccurredAt: occurredAtProto, }, }) @@ -286,8 +302,10 @@ func TestListNodeExecutionWithParent(t *testing.T) { NodeId: "child", ExecutionId: nodeExecutionId.ExecutionId, }, - Phase: core.NodeExecution_RUNNING, - InputUri: inputURI, + Phase: core.NodeExecution_RUNNING, + InputValue: &event.NodeExecutionEvent_InputUri{ + InputUri: inputURI, + }, OccurredAt: occurredAtProto, SpecNodeId: "spec", RetryGroup: "1", @@ -305,8 +323,10 @@ func TestListNodeExecutionWithParent(t *testing.T) { NodeId: "child2", ExecutionId: nodeExecutionId.ExecutionId, }, - Phase: core.NodeExecution_RUNNING, - InputUri: inputURI, + Phase: core.NodeExecution_RUNNING, + InputValue: &event.NodeExecutionEvent_InputUri{ + InputUri: inputURI, + }, OccurredAt: occurredAtProto, SpecNodeId: "spec", RetryGroup: "1", @@ -403,9 +423,11 @@ func TestCreateChildNodeExecutionForTaskExecution(t *testing.T) { _, err = client.CreateNodeEvent(ctx, &admin.NodeExecutionEventRequest{ RequestId: "request id", Event: &event.NodeExecutionEvent{ - Id: childNodeExecutionID, - Phase: core.NodeExecution_RUNNING, - InputUri: inputURI, + Id: childNodeExecutionID, + Phase: core.NodeExecution_RUNNING, + InputValue: &event.NodeExecutionEvent_InputUri{ + InputUri: inputURI, + }, OccurredAt: childOccurredAtProto, ParentTaskMetadata: &event.ParentTaskExecutionMetadata{ Id: taskExecutionIdentifier, diff --git a/tests/task_execution_test.go b/tests/task_execution_test.go index 2b7de0f80..508bc8ccd 100644 --- a/tests/task_execution_test.go +++ b/tests/task_execution_test.go @@ -60,9 +60,11 @@ func createTaskAndNodeExecution( _, err = client.CreateNodeEvent(ctx, &admin.NodeExecutionEventRequest{ RequestId: "request id", Event: &event.NodeExecutionEvent{ - Id: nodeExecutionId, - Phase: core.NodeExecution_RUNNING, - InputUri: inputURI, + Id: nodeExecutionId, + Phase: core.NodeExecution_RUNNING, + InputValue: &event.NodeExecutionEvent_InputUri{ + InputUri: inputURI, + }, OccurredAt: occurredAtProto, }, }) @@ -90,7 +92,9 @@ func TestCreateTaskExecution(t *testing.T) { Phase: core.TaskExecution_RUNNING, RetryAttempt: 1, OccurredAt: occurredAtProto, - InputUri: taskExecInputURI, + InputValue: &event.TaskExecutionEvent_InputUri{ + InputUri: taskExecInputURI, + }, }, }) assert.Nil(t, err) @@ -124,8 +128,10 @@ func TestCreateAndUpdateTaskExecution(t *testing.T) { ParentNodeExecutionId: nodeExecutionId, Phase: core.TaskExecution_FAILED, OccurredAt: beganAtProto, - InputUri: taskExecInputURI, - RetryAttempt: 0, + InputValue: &event.TaskExecutionEvent_InputUri{ + InputUri: taskExecInputURI, + }, + RetryAttempt: 0, }, }) assert.Nil(t, err) @@ -147,7 +153,9 @@ func TestCreateAndUpdateTaskExecution(t *testing.T) { Phase: core.TaskExecution_RUNNING, RetryAttempt: 1, OccurredAt: beganAtProto, - InputUri: taskExecInputURI, + InputValue: &event.TaskExecutionEvent_InputUri{ + InputUri: taskExecInputURI, + }, }, }) assert.Nil(t, err) @@ -166,7 +174,9 @@ func TestCreateAndUpdateTaskExecution(t *testing.T) { Phase: core.TaskExecution_SUCCEEDED, RetryAttempt: 1, OccurredAt: endedAtProto, - InputUri: taskExecInputURI, + InputValue: &event.TaskExecutionEvent_InputUri{ + InputUri: taskExecInputURI, + }, }, }) assert.Nil(t, err) @@ -204,8 +214,10 @@ func TestCreateAndUpdateTaskExecutionPhaseVersion(t *testing.T) { ParentNodeExecutionId: nodeExecutionId, Phase: core.TaskExecution_RUNNING, OccurredAt: beganAtProto, - InputUri: taskExecInputURI, - RetryAttempt: 0, + InputValue: &event.TaskExecutionEvent_InputUri{ + InputUri: taskExecInputURI, + }, + RetryAttempt: 0, }, }) assert.Nil(t, err) @@ -237,9 +249,11 @@ func TestCreateAndUpdateTaskExecutionPhaseVersion(t *testing.T) { Phase: core.TaskExecution_RUNNING, PhaseVersion: 1, OccurredAt: beganAtProto, - InputUri: taskExecInputURI, - RetryAttempt: 0, - CustomInfo: &customInfo, + InputValue: &event.TaskExecutionEvent_InputUri{ + InputUri: taskExecInputURI, + }, + RetryAttempt: 0, + CustomInfo: &customInfo, }, }) assert.Nil(t, err) @@ -275,7 +289,9 @@ func TestCreateAndListTaskExecution(t *testing.T) { Phase: core.TaskExecution_RUNNING, RetryAttempt: 1, OccurredAt: occurredAtProto, - InputUri: taskExecInputURI, + InputValue: &event.TaskExecutionEvent_InputUri{ + InputUri: taskExecInputURI, + }, }, }) assert.Nil(t, err) @@ -388,7 +404,9 @@ func TestGetTaskExecutionData(t *testing.T) { ParentNodeExecutionId: nodeExecutionId, Phase: core.TaskExecution_SUCCEEDED, OccurredAt: beganAtProto, - InputUri: taskExecInputURI, + InputValue: &event.TaskExecutionEvent_InputUri{ + InputUri: taskExecInputURI, + }, OutputResult: &event.TaskExecutionEvent_OutputUri{ OutputUri: "s3://flyte/metadata/admin/output/uri", },