diff --git a/server/application/application.go b/server/application/application.go index fe9697dc77056e..9434f919649f7b 100644 --- a/server/application/application.go +++ b/server/application/application.go @@ -1569,8 +1569,13 @@ func (s *Server) PodLogs(q *application.ApplicationPodLogsQuery, ws application. return nil } - if len(pods) > maxPodLogsToRender { - return errors.New("Max pods to view logs are reached. Please provide more granular query.") + maxPodLogsToRender, err := s.settingsMgr.GetMaxPodLogsToRender() + if err != nil { + return fmt.Errorf("error getting MaxPodLogsToRender config: %w", err) + } + + if int64(len(pods)) > maxPodLogsToRender { + return status.Error(codes.InvalidArgument, "max pods to view logs are reached. Please provide more granular query") } var streams []chan logEntry diff --git a/server/application/application_test.go b/server/application/application_test.go index 57b740a6f1ea4a..3363d877210bfe 100644 --- a/server/application/application_test.go +++ b/server/application/application_test.go @@ -1965,6 +1965,60 @@ func TestLogsGetSelectedPod(t *testing.T) { }) } +func TestMaxPodLogsRender(t *testing.T) { + f := func(enf *rbac.Enforcer) { + _ = enf.SetBuiltinPolicy(assets.BuiltinPolicyCSV) + enf.SetDefaultRole("role:none") + } + + maxPodLogsToRender, _ := newTestAppServerWithEnforcerConfigure(f, t).settingsMgr.GetMaxPodLogsToRender() + podNumber := int(maxPodLogsToRender + 1) + resources := make([]appsv1.ResourceStatus, podNumber) + runtimeObjects := make([]runtime.Object, podNumber+1) + + for i := 0; i < podNumber; i++ { + pod := v1.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Pod", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("pod-%d", i), + Namespace: "test", + }, + } + resources[i] = appsv1.ResourceStatus{ + Group: pod.GroupVersionKind().Group, + Kind: pod.GroupVersionKind().Kind, + Version: pod.GroupVersionKind().Version, + Name: pod.Name, + Namespace: pod.Namespace, + Status: "Synced", + } + runtimeObjects[i] = kube.MustToUnstructured(&pod) + } + + testApp := newTestApp(func(app *appsv1.Application) { + app.Name = "test" + app.Status.Resources = resources + }) + runtimeObjects[11] = testApp + + appServer := newTestAppServerWithEnforcerConfigure(f, t, runtimeObjects...) + + noRoleCtx := context.Background() + // nolint:staticcheck + adminCtx := context.WithValue(noRoleCtx, "claims", &jwt.MapClaims{"groups": []string{"admin"}}) + + t.Run("PodLogs", func(t *testing.T) { + err := appServer.PodLogs(&application.ApplicationPodLogsQuery{Name: pointer.String("test")}, &TestPodLogsServer{ctx: adminCtx}) + assert.NotNil(t, err) + statusCode, _ := status.FromError(err) + assert.Equal(t, codes.InvalidArgument, statusCode.Code()) + assert.Equal(t, "rpc error: code = InvalidArgument desc = max pods to view logs are reached. Please provide more granular query", err.Error()) + }) +} + // refreshAnnotationRemover runs an infinite loop until it detects and removes refresh annotation or given context is done func refreshAnnotationRemover(t *testing.T, ctx context.Context, patched *int32, appServer *Server, appName string, ch chan string) { for ctx.Err() == nil { diff --git a/util/settings/settings.go b/util/settings/settings.go index 9cd38417e081ef..3532ad091968ce 100644 --- a/util/settings/settings.go +++ b/util/settings/settings.go @@ -101,6 +101,8 @@ type ArgoCDSettings struct { InClusterEnabled bool `json:"inClusterEnabled"` // ServerRBACLogEnforceEnable temporary var indicates whether rbac will be enforced on logs ServerRBACLogEnforceEnable bool `json:"serverRBACLogEnforceEnable"` + // MaxPodLogsToRender the maximum number of pod logs to render + MaxPodLogsToRender int64 `json:"maxPodLogsToRender"` // ExecEnabled indicates whether the UI exec feature is enabled ExecEnabled bool `json:"execEnabled"` // ExecShells restricts which shells are allowed for `exec` and in which order they are tried @@ -475,6 +477,8 @@ const ( inClusterEnabledKey = "cluster.inClusterEnabled" // settingsServerRBACLogEnforceEnable is the key to configure whether logs RBAC enforcement is enabled settingsServerRBACLogEnforceEnableKey = "server.rbac.log.enforce.enable" + // MaxPodLogsToRender the maximum number of pod logs to render + settingsMaxPodLogsToRender = "server.maxPodLogsToRender" // helmValuesFileSchemesKey is the key to configure the list of supported helm values file schemas helmValuesFileSchemesKey = "helm.valuesFileSchemes" // execEnabledKey is the key to configure whether the UI exec feature is enabled @@ -756,6 +760,19 @@ func (mgr *SettingsManager) GetServerRBACLogEnforceEnable() (bool, error) { return strconv.ParseBool(argoCDCM.Data[settingsServerRBACLogEnforceEnableKey]) } +func (mgr *SettingsManager) GetMaxPodLogsToRender() (int64, error) { + argoCDCM, err := mgr.getConfigMap() + if err != nil { + return 10, err + } + + if argoCDCM.Data[settingsMaxPodLogsToRender] == "" { + return 10, nil + } + + return strconv.ParseInt(argoCDCM.Data[settingsMaxPodLogsToRender], 10, 64) +} + func (mgr *SettingsManager) GetDeepLinks(deeplinkType string) ([]DeepLink, error) { argoCDCM, err := mgr.getConfigMap() if err != nil { @@ -1412,6 +1429,13 @@ func updateSettingsFromConfigMap(settings *ArgoCDSettings, argoCDCM *apiv1.Confi if settings.PasswordPattern == "" { settings.PasswordPattern = common.PasswordPatten } + if maxPodLogsToRenderStr, ok := argoCDCM.Data[settingsMaxPodLogsToRender]; ok { + if val, err := strconv.ParseInt(maxPodLogsToRenderStr, 10, 64); err != nil { + log.Warnf("Failed to parse '%s' key: %v", settingsMaxPodLogsToRender, err) + } else { + settings.MaxPodLogsToRender = val + } + } settings.InClusterEnabled = argoCDCM.Data[inClusterEnabledKey] != "false" settings.ExecEnabled = argoCDCM.Data[execEnabledKey] == "true" execShells := argoCDCM.Data[execShellsKey]