From 03d822e077e2746a06968f208aeb8bb03fa85d54 Mon Sep 17 00:00:00 2001 From: frezes Date: Wed, 10 Jan 2024 20:44:50 +0800 Subject: [PATCH] get config form ServiceCR and remove MonitoringOptions in whizard-config Signed-off-by: frezes --- Makefile | 4 +- cmd/controller-manager/app/controllers.go | 50 +- cmd/controller-manager/app/options/options.go | 22 +- cmd/controller-manager/app/server.go | 10 - .../bases/monitoring.whizard.io_gateways.yaml | 2 +- .../monitoring.whizard.io_ingesters.yaml | 56 + .../bases/monitoring.whizard.io_rulers.yaml | 65 +- .../bases/monitoring.whizard.io_services.yaml | 133 ++- config/default/manager_auth_proxy_patch.yaml | 17 +- config/rbac/role.yaml | 27 - config/samples/sample.yaml | 66 +- go.mod | 2 +- go.sum | 4 +- pkg/api/monitoring/v1alpha1/service_types.go | 18 +- pkg/api/monitoring/v1alpha1/types.go | 10 + .../v1alpha1/zz_generated.deepcopy.go | 33 + pkg/constants/constans.go | 73 +- pkg/controllers/config/config.go | 3 - pkg/controllers/config/config_test.go | 282 +---- .../monitoring/compactor_controller.go | 25 +- .../monitoring/gateway_controller.go | 21 +- .../monitoring/ingestor_controller.go | 57 +- pkg/controllers/monitoring/options/common.go | 242 ----- .../monitoring/options/common_test.go | 185 ---- .../monitoring/options/component.go | 962 ------------------ pkg/controllers/monitoring/options/options.go | 100 -- .../monitoring/query_controller.go | 29 +- .../monitoring/query_frontend_controller.go | 25 +- .../resources/compactor/compactor.go | 5 +- .../resources/compactor/statefulset.go | 7 +- .../resources/gateway/deployment.go | 4 +- .../monitoring/resources/ingester/ingester.go | 5 +- .../resources/ingester/statefulset.go | 7 +- .../monitoring/resources/query/deployment.go | 4 +- .../monitoring/resources/query/query.go | 7 +- .../resources/queryfrontend/deployment.go | 2 +- .../monitoring/resources/router/configmap.go | 4 +- .../monitoring/resources/router/router.go | 7 +- .../monitoring/resources/ruler/ruler.go | 9 +- .../monitoring/resources/ruler/statefulset.go | 16 +- .../monitoring/resources/service.go | 146 +++ .../monitoring/resources/store/store.go | 5 +- .../monitoring/resources/tenant/compactor.go | 2 +- .../monitoring/resources/tenant/ingestor.go | 10 +- .../monitoring/resources/tenant/ruler.go | 7 +- .../monitoring/resources/tenant/tenant.go | 6 +- .../monitoring/router_controller.go | 26 +- .../monitoring/ruler_controller.go | 35 +- .../monitoring/storage_controller.go | 13 - .../monitoring/store_controller.go | 72 +- .../monitoring/tenant_controller.go | 14 +- 51 files changed, 808 insertions(+), 2128 deletions(-) delete mode 100644 pkg/controllers/monitoring/options/common.go delete mode 100644 pkg/controllers/monitoring/options/common_test.go delete mode 100644 pkg/controllers/monitoring/options/component.go delete mode 100644 pkg/controllers/monitoring/options/options.go create mode 100644 pkg/controllers/monitoring/resources/service.go diff --git a/Makefile b/Makefile index 000028cc..7c3e58df 100644 --- a/Makefile +++ b/Makefile @@ -125,14 +125,14 @@ docker-build-monitoring-block-manager: ##@ Deployment install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. - $(KUSTOMIZE) build config/crd | kubectl apply -f - + $(KUSTOMIZE) build config/crd | kubectl apply --server-side -f - uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. $(KUSTOMIZE) build config/crd | kubectl delete -f - deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. cd config/manager && $(KUSTOMIZE) edit set image controller=${CONTROLLER_MANAGER_IMG} - $(KUSTOMIZE) build config/default | kubectl apply -f - + $(KUSTOMIZE) build config/default | kubectl apply --server-side -f - undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. $(KUSTOMIZE) build config/default | kubectl delete -f - diff --git a/cmd/controller-manager/app/controllers.go b/cmd/controller-manager/app/controllers.go index 6e79e2c0..d3791cea 100644 --- a/cmd/controller-manager/app/controllers.go +++ b/cmd/controller-manager/app/controllers.go @@ -3,22 +3,18 @@ package app import ( "context" - "github.com/kubesphere/whizard/cmd/controller-manager/app/options" "github.com/kubesphere/whizard/pkg/client/k8s" "github.com/kubesphere/whizard/pkg/controllers/monitoring" - "github.com/kubesphere/whizard/pkg/informers" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/manager" ) -func addControllers(mgr manager.Manager, client k8s.Client, informerFactory informers.InformerFactory, - cmOptions *options.ControllerManagerOptions, ctx context.Context) error { +func addControllers(mgr manager.Manager, client k8s.Client, ctx context.Context) error { if err := (&monitoring.GatewayReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), Context: ctx, - Options: cmOptions.MonitoringOptions.Gateway, }).SetupWithManager(mgr); err != nil { klog.Errorf("Unable to create Gateway controller: %v", err) return err @@ -28,7 +24,6 @@ func addControllers(mgr manager.Manager, client k8s.Client, informerFactory info Client: mgr.GetClient(), Scheme: mgr.GetScheme(), Context: ctx, - Options: cmOptions.MonitoringOptions.QueryFrontend, }).SetupWithManager(mgr); err != nil { klog.Errorf("Unable to create Query Frontend controller: %v", err) return err @@ -38,7 +33,6 @@ func addControllers(mgr manager.Manager, client k8s.Client, informerFactory info Client: mgr.GetClient(), Scheme: mgr.GetScheme(), Context: ctx, - Options: cmOptions.MonitoringOptions, }).SetupWithManager(mgr); err != nil { klog.Errorf("Unable to create Query controller: %v", err) return err @@ -48,62 +42,51 @@ func addControllers(mgr manager.Manager, client k8s.Client, informerFactory info Client: mgr.GetClient(), Scheme: mgr.GetScheme(), Context: ctx, - Options: cmOptions.MonitoringOptions, }).SetupWithManager(mgr); err != nil { klog.Errorf("Unable to create Router controller: %v", err) return err } if err := (&monitoring.StoreReconciler{ - DefaulterValidator: monitoring.CreateStoreDefaulterValidator(cmOptions.MonitoringOptions.Store), - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Context: ctx, - Options: cmOptions.MonitoringOptions.Store, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Context: ctx, }).SetupWithManager(mgr); err != nil { klog.Errorf("Unable to create Store controller: %v", err) return err } if err := (&monitoring.CompactorReconciler{ - DefaulterValidator: monitoring.CreateCompactorDefaulterValidator(cmOptions.MonitoringOptions.Compactor), - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Context: ctx, - Options: cmOptions.MonitoringOptions.Compactor, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Context: ctx, }).SetupWithManager(mgr); err != nil { klog.Errorf("Unable to create Compactor controller: %v", err) return err } if err := (&monitoring.IngesterReconciler{ - DefaulterValidator: monitoring.CreateIngesterDefaulterValidator(cmOptions.MonitoringOptions.Ingester), - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Context: ctx, - Options: cmOptions.MonitoringOptions.Ingester, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Context: ctx, }).SetupWithManager(mgr); err != nil { klog.Errorf("Unable to create Ingester controller: %v", err) return err } if err := (&monitoring.RulerReconciler{ - DefaulterValidator: monitoring.CreateRulerDefaulterValidator(cmOptions.MonitoringOptions.Ruler), - Option: cmOptions.MonitoringOptions.Ruler, - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Context: ctx, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Context: ctx, }).SetupWithManager(mgr); err != nil { klog.Errorf("Unable to create Ruler controller: %v", err) return err } if err := (&monitoring.TenantReconciler{ - DefaulterValidator: monitoring.CreateTenantDefaulterValidator(*cmOptions.MonitoringOptions), - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Context: ctx, - Options: cmOptions.MonitoringOptions, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Context: ctx, }).SetupWithManager(mgr); err != nil { klog.Errorf("Unable to create Tenant controller: %v", err) return err @@ -113,7 +96,6 @@ func addControllers(mgr manager.Manager, client k8s.Client, informerFactory info Client: mgr.GetClient(), Scheme: mgr.GetScheme(), Context: ctx, - Options: cmOptions.MonitoringOptions.Storage, }).SetupWithManager(mgr); err != nil { klog.Errorf("Unable to create Storage controller: %v", err) return err diff --git a/cmd/controller-manager/app/options/options.go b/cmd/controller-manager/app/options/options.go index a53338b6..ffd66ba5 100644 --- a/cmd/controller-manager/app/options/options.go +++ b/cmd/controller-manager/app/options/options.go @@ -12,16 +12,13 @@ import ( "github.com/kubesphere/whizard/pkg/client/k8s" "github.com/kubesphere/whizard/pkg/controllers/config" - monitoring "github.com/kubesphere/whizard/pkg/controllers/monitoring/options" ) type ControllerManagerOptions struct { KubernetesOptions *k8s.KubernetesOptions - MonitoringOptions *monitoring.Options LeaderElect bool LeaderElection *leaderelection.LeaderElectionConfig - WebhookCertDir string MetricsBindAddress string HealthProbeBindAddress string @@ -30,15 +27,13 @@ type ControllerManagerOptions struct { func NewControllerManagerOptions() *ControllerManagerOptions { return &ControllerManagerOptions{ KubernetesOptions: k8s.NewKubernetesOptions(), - MonitoringOptions: monitoring.NewOptions(), LeaderElection: &leaderelection.LeaderElectionConfig{ LeaseDuration: 30 * time.Second, RenewDeadline: 15 * time.Second, RetryPeriod: 5 * time.Second, }, - LeaderElect: false, - WebhookCertDir: "", + LeaderElect: false, MetricsBindAddress: ":8080", HealthProbeBindAddress: ":8081", @@ -48,19 +43,13 @@ func NewControllerManagerOptions() *ControllerManagerOptions { func (s *ControllerManagerOptions) Flags() cliflag.NamedFlagSets { fss := cliflag.NamedFlagSets{} s.KubernetesOptions.AddFlags(fss.FlagSet("kubernetes"), s.KubernetesOptions) - s.MonitoringOptions.AddFlags(fss.FlagSet("monitoring"), s.MonitoringOptions) fs := fss.FlagSet("leaderelection") s.bindLeaderElectionFlags(s.LeaderElection, fs) fs.BoolVar(&s.LeaderElect, "leader-elect", s.LeaderElect, ""+ - "Whether to enable leader election. This field should be enabled when controller manager"+ - "deployed with multiple replicas.") - - fs.StringVar(&s.WebhookCertDir, "webhook-cert-dir", s.WebhookCertDir, ""+ - "Certificate directory used to setup webhooks, need tls.crt and tls.key placed inside."+ - "if not set, webhook server would look up the server key and certificate in"+ - "{TempDir}/k8s-webhook-server/serving-certs") + "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") kfs := fss.FlagSet("klog") local := flag.NewFlagSet("klog", flag.ExitOnError) @@ -80,7 +69,7 @@ func (s *ControllerManagerOptions) Flags() cliflag.NamedFlagSets { func (s *ControllerManagerOptions) Validate() []error { var errs []error errs = append(errs, s.KubernetesOptions.Validate()...) - errs = append(errs, s.MonitoringOptions.Validate()...) + return errs } @@ -107,7 +96,4 @@ func (s *ControllerManagerOptions) MergeConfig(cfg *config.Config) { cfg.KubernetesOptions.ApplyTo(s.KubernetesOptions) } - if cfg.MonitoringOptions != nil { - cfg.MonitoringOptions.ApplyTo(s.MonitoringOptions) - } } diff --git a/cmd/controller-manager/app/server.go b/cmd/controller-manager/app/server.go index b4345fd9..0b8b458e 100644 --- a/cmd/controller-manager/app/server.go +++ b/cmd/controller-manager/app/server.go @@ -18,7 +18,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/manager/signals" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" - "sigs.k8s.io/controller-runtime/pkg/webhook" "github.com/kubesphere/whizard/cmd/controller-manager/app/options" "github.com/kubesphere/whizard/pkg/apis" @@ -39,9 +38,6 @@ func NewControllerManagerCommand() *cobra.Command { conf.KubernetesOptions.ApplyTo(s.KubernetesOptions) } - if conf.MonitoringOptions != nil { - conf.MonitoringOptions.ApplyTo(s.MonitoringOptions) - } } else { klog.Fatal("Failed to load configuration from disk", err) } @@ -142,10 +138,6 @@ func run(s *options.ControllerManagerOptions, ctx context.Context) error { Metrics: metricsserver.Options{ BindAddress: s.MetricsBindAddress, }, - WebhookServer: webhook.NewServer(webhook.Options{ - CertDir: s.WebhookCertDir, - Port: 8443, - }), } if s.LeaderElect { @@ -173,8 +165,6 @@ func run(s *options.ControllerManagerOptions, ctx context.Context) error { if err = addControllers(mgr, kubernetesClient, - informerFactory, - s, ctx); err != nil { return fmt.Errorf("unable to register controllers to the manager: %v", err) } diff --git a/config/crd/bases/monitoring.whizard.io_gateways.yaml b/config/crd/bases/monitoring.whizard.io_gateways.yaml index c30fa2cf..1a0d311b 100644 --- a/config/crd/bases/monitoring.whizard.io_gateways.yaml +++ b/config/crd/bases/monitoring.whizard.io_gateways.yaml @@ -16,7 +16,7 @@ spec: versions: - additionalPrinterColumns: - description: The number of desired replicas - jsonPath: .spec.Node + jsonPath: .spec.replicas name: Replicas type: integer - description: The nodePort of Gateway service diff --git a/config/crd/bases/monitoring.whizard.io_ingesters.yaml b/config/crd/bases/monitoring.whizard.io_ingesters.yaml index 944ed071..256d8f3b 100644 --- a/config/crd/bases/monitoring.whizard.io_ingesters.yaml +++ b/config/crd/bases/monitoring.whizard.io_ingesters.yaml @@ -1344,6 +1344,62 @@ spec: type: object x-kubernetes-map-type: atomic type: array + ingesterTsdbCleanup: + properties: + image: + description: Image is the envoy image with tag/version + type: string + resources: + description: Define resources requests and limits for sidecar + container. + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be + set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in + pod.spec.resourceClaims of the Pod where this field + is used. It makes that resource available inside a + container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests cannot exceed + Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object localTsdbRetention: description: LocalTsdbRetention configs how long to retain raw samples on local storage. diff --git a/config/crd/bases/monitoring.whizard.io_rulers.yaml b/config/crd/bases/monitoring.whizard.io_rulers.yaml index 5d8755ca..64dcc912 100644 --- a/config/crd/bases/monitoring.whizard.io_rulers.yaml +++ b/config/crd/bases/monitoring.whizard.io_rulers.yaml @@ -1403,7 +1403,9 @@ spec: type: object type: object evaluationInterval: - description: Interval between consecutive evaluations. + default: 1m + description: "Interval between consecutive evaluations. \n Default: + \"1m\"" pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ type: string flags: @@ -1746,6 +1748,62 @@ spec: type: object type: object type: object + rulerWriteProxy: + properties: + image: + description: Image is the envoy image with tag/version + type: string + resources: + description: Define resources requests and limits for sidecar + container. + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be + set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in + pod.spec.resourceClaims of the Pod where this field + is used. It makes that resource available inside a + container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests cannot exceed + Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object secrets: description: Secrets is a list of Secrets in the same namespace as the component object, which shall be mounted into the Prometheus @@ -1923,9 +1981,10 @@ spec: type: object type: object shards: - description: Number of shards to take the hash of fully qualified + default: 1 + description: 'Number of shards to take the hash of fully qualified name of the rule group in order to split rules. Each shard of rules - will be bound to one separate statefulset. + will be bound to one separate statefulset. Default: 1' format: int32 type: integer tenant: diff --git a/config/crd/bases/monitoring.whizard.io_services.yaml b/config/crd/bases/monitoring.whizard.io_services.yaml index 2e8bb375..5ffc2152 100644 --- a/config/crd/bases/monitoring.whizard.io_services.yaml +++ b/config/crd/bases/monitoring.whizard.io_services.yaml @@ -4494,6 +4494,12 @@ spec: description: "DefaultTenantsPerIngester Whizard default tenant count per ingester. \n Default: 3" type: integer + disableTsdbCleanup: + default: true + description: "DisableTSDBCleanup Disable the TSDB cleanup of ingester. + The cleanup will delete the blocks that belong to deleted tenants + in the data directory of ingester TSDB. \n Default: true" + type: boolean flags: description: Flags is the flags of component. items: @@ -4521,6 +4527,62 @@ spec: type: object x-kubernetes-map-type: atomic type: array + ingesterTsdbCleanup: + properties: + image: + description: Image is the envoy image with tag/version + type: string + resources: + description: Define resources requests and limits for sidecar + container. + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. \n This field + is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object localTsdbRetention: description: LocalTsdbRetention configs how long to retain raw samples on local storage. @@ -9157,7 +9219,6 @@ spec: type: array type: object rulerTemplateSpec: - description: RulerSpec defines the desired state of a Ruler properties: affinity: description: If specified, the pod's scheduling constraints. @@ -10529,6 +10590,11 @@ spec: type: string type: object type: object + disableAlertingRulesAutoSelection: + default: false + description: "DisableAlertingRulesAutoSelection disable auto select + alerting rules in tenant ruler \n Default: false" + type: boolean envoy: properties: image: @@ -10586,7 +10652,9 @@ spec: type: object type: object evaluationInterval: - description: Interval between consecutive evaluations. + default: 1m + description: "Interval between consecutive evaluations. \n Default: + \"1m\"" pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ type: string flags: @@ -10932,6 +11000,62 @@ spec: type: object type: object type: object + rulerWriteProxy: + properties: + image: + description: Image is the envoy image with tag/version + type: string + resources: + description: Define resources requests and limits for sidecar + container. + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. \n This field + is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object secrets: description: Secrets is a list of Secrets in the same namespace as the component object, which shall be mounted into the Prometheus @@ -11116,9 +11240,10 @@ spec: type: object type: object shards: - description: Number of shards to take the hash of fully qualified + default: 1 + description: 'Number of shards to take the hash of fully qualified name of the rule group in order to split rules. Each shard of - rules will be bound to one separate statefulset. + rules will be bound to one separate statefulset. Default: 1' format: int32 type: integer tenant: diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml index a224be19..70c3437f 100644 --- a/config/default/manager_auth_proxy_patch.yaml +++ b/config/default/manager_auth_proxy_patch.yaml @@ -10,15 +10,28 @@ spec: spec: containers: - name: kube-rbac-proxy - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.15.0 args: - "--secure-listen-address=0.0.0.0:8443" - "--upstream=http://127.0.0.1:8080/" - "--logtostderr=true" - - "--v=10" + - "--v=0" ports: - containerPort: 8443 + protocol: TCP name: https + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 5m + memory: 64Mi - name: manager args: - "--health-probe-bind-address=:8081" diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index ad147554..6ccb1b9f 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -16,19 +16,6 @@ rules: - patch - update - watch -- apiGroups: - - apps - resources: - - deployments - - statefulsets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - apiGroups: - apps resources: @@ -67,20 +54,6 @@ rules: - patch - update - watch -- apiGroups: - - "" - resources: - - configmaps - - serviceaccounts - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - apiGroups: - "" resources: diff --git a/config/samples/sample.yaml b/config/samples/sample.yaml index cb2fc8ee..49c873c0 100644 --- a/config/samples/sample.yaml +++ b/config/samples/sample.yaml @@ -3,66 +3,97 @@ apiVersion: monitoring.whizard.io/v1alpha1 kind: Service metadata: - name: central + name: whizard namespace: kubesphere-monitoring-system spec: tenantHeader: "WHIZARD-TENANT" defaultTenantId: "default-tenant" tenantLabelName: "tenant_id" + compactorTemplateSpec: + securityContext: + fsGroup: 0 + dataVolume: + emptyDir: {} + image: thanosio/thanos:v0.33.0 + gatewayTemplateSpec: {} + ingesterTemplateSpec: + securityContext: + fsGroup: 0 + dataVolume: + emptyDir: {} + queryTemplateSpec: + image: thanosio/thanos:v0.33.0 + queryFrontendTemplateSpec: + image: thanosio/thanos:v0.33.0 + routerTemplateSpec: + image: thanosio/thanos:v0.33.0 + rulerTemplateSpec: + securityContext: + fsGroup: 0 + image: thanosio/thanos:v0.33.0 + storeTemplateSpec: + securityContext: + fsGroup: 0 + dataVolume: + emptyDir: {} + image: thanosio/thanos:v0.33.0 --- apiVersion: monitoring.whizard.io/v1alpha1 kind: Gateway metadata: - name: central + name: whizard namespace: kubesphere-monitoring-system labels: - monitoring.whizard.io/service: kubesphere-monitoring-system.central + monitoring.whizard.io/service: kubesphere-monitoring-system.whizard spec: {} --- apiVersion: monitoring.whizard.io/v1alpha1 kind: Query metadata: - name: central + name: whizard namespace: kubesphere-monitoring-system labels: - monitoring.whizard.io/service: kubesphere-monitoring-system.central + monitoring.whizard.io/service: kubesphere-monitoring-system.whizard spec: replicaLabelNames: - prometheus_replica - receive_replica - ruler_replica + image: thanosio/thanos:v0.32.5 --- apiVersion: monitoring.whizard.io/v1alpha1 kind: QueryFrontend metadata: - name: central + name: whizard namespace: kubesphere-monitoring-system labels: - monitoring.whizard.io/service: kubesphere-monitoring-system.central + monitoring.whizard.io/service: kubesphere-monitoring-system.whizard spec: {} --- apiVersion: monitoring.whizard.io/v1alpha1 kind: Router metadata: - name: central + name: whizard namespace: kubesphere-monitoring-system labels: - monitoring.whizard.io/service: kubesphere-monitoring-system.central + monitoring.whizard.io/service: kubesphere-monitoring-system.whizard spec: replicationFactor: 2 + + --- ### global alert ruler apiVersion: monitoring.whizard.io/v1alpha1 kind: Ruler metadata: labels: - monitoring.whizard.io/service: kubesphere-monitoring-system.central - name: central + monitoring.whizard.io/service: kubesphere-monitoring-system.whizard + name: whizard namespace: kubesphere-monitoring-system spec: alertmanagersUrl: @@ -78,4 +109,15 @@ spec: operator: In values: - global - shards: 1 \ No newline at end of file + shards: 1 + +--- +apiVersion: monitoring.whizard.io/v1alpha1 +kind: Tenant +metadata: + labels: + monitoring.whizard.io/service: kubesphere-monitoring-system.whizard + monitoring.whizard.io/storage: default + name: test +spec: + tenant: test \ No newline at end of file diff --git a/go.mod b/go.mod index de4a60e2..141c9993 100644 --- a/go.mod +++ b/go.mod @@ -168,7 +168,7 @@ require ( go.uber.org/zap v1.25.0 // indirect go4.org/intern v0.0.0-20230525184215-6c62f75575cb // indirect go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 // indirect - golang.org/x/crypto v0.16.0 // indirect + golang.org/x/crypto v0.17.0 // indirect golang.org/x/exp v0.0.0-20231127185646-65229373498e // indirect golang.org/x/mod v0.14.0 // indirect golang.org/x/net v0.19.0 // indirect diff --git a/go.sum b/go.sum index 29e15a55..5b310c45 100644 --- a/go.sum +++ b/go.sum @@ -900,8 +900,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20221012134737-56aed061732a/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= diff --git a/pkg/api/monitoring/v1alpha1/service_types.go b/pkg/api/monitoring/v1alpha1/service_types.go index d9a9b661..cd06c748 100644 --- a/pkg/api/monitoring/v1alpha1/service_types.go +++ b/pkg/api/monitoring/v1alpha1/service_types.go @@ -52,7 +52,7 @@ type ServiceSpec struct { GatewayTemplateSpec GatewaySpec `json:"gatewayTemplateSpec"` QueryFrontendTemplateSpec QueryFrontendSpec `json:"queryFrontendTemplateSpec"` QueryTemplateSpec QuerySpec `json:"queryTemplateSpec"` - RulerTemplateSpec RulerSpec `json:"rulerTemplateSpec"` + RulerTemplateSpec RulerTemplateSpec `json:"rulerTemplateSpec"` RouterTemplateSpec RouterSpec `json:"routerTemplateSpec"` IngesterTemplateSpec IngesterTemplateSpec `json:"ingesterTemplateSpec"` StoreTemplateSpec StoreSpec `json:"storeTemplateSpec"` @@ -72,6 +72,22 @@ type IngesterTemplateSpec struct { // Default: "3h" // +kubebuilder:default:="3h" DefaultIngesterRetentionPeriod Duration `json:"defaultIngesterRetentionPeriod,omitempty"` + // DisableTSDBCleanup Disable the TSDB cleanup of ingester. + // The cleanup will delete the blocks that belong to deleted tenants in the data directory of ingester TSDB. + // + // Default: true + // +kubebuilder:default:=true + DisableTSDBCleanup *bool `json:"disableTsdbCleanup,omitempty"` +} + +type RulerTemplateSpec struct { + RulerSpec `json:",inline"` + + // DisableAlertingRulesAutoSelection disable auto select alerting rules in tenant ruler + // + // Default: false + // +kubebuilder:default:=false + DisableAlertingRulesAutoSelection *bool `json:"disableAlertingRulesAutoSelection,omitempty"` } type CompactorTemplateSpec struct { diff --git a/pkg/api/monitoring/v1alpha1/types.go b/pkg/api/monitoring/v1alpha1/types.go index a099adf3..ee5613a6 100644 --- a/pkg/api/monitoring/v1alpha1/types.go +++ b/pkg/api/monitoring/v1alpha1/types.go @@ -99,8 +99,10 @@ type CommonSpec struct { // Log filtering level. Possible options: error, warn, info, debug. LogLevel string `json:"logLevel,omitempty"` + // Log format to use. Possible options: logfmt or json. LogFormat string `json:"logFormat,omitempty"` + // Flags is the flags of component. Flags []string `json:"flags,omitempty"` } @@ -570,6 +572,8 @@ type CompactorList struct { type IngesterSpec struct { CommonSpec `json:",inline"` + IngesterTSDBCleanUp SidecarSpec `json:"ingesterTsdbCleanup,omitempty"` + // Tenants if not empty indicates current config is for hard tenants; otherwise, it is for soft tenants. Tenants []string `json:"tenants,omitempty"` @@ -632,6 +636,7 @@ type RulerSpec struct { CommonSpec `json:",inline"` RulerQueryProxy SidecarSpec `json:"rulerQueryProxy,omitempty"` + RulerWriteProxy SidecarSpec `json:"rulerWriteProxy,omitempty"` Envoy SidecarSpec `json:"envoy,omitempty"` @@ -646,6 +651,8 @@ type RulerSpec struct { // Number of shards to take the hash of fully qualified name of the rule group in order to split rules. // Each shard of rules will be bound to one separate statefulset. + // Default: 1 + // +kubebuilder:default:=1 Shards *int32 `json:"shards,omitempty"` // Tenant if not empty indicates which tenant's data is evaluated for the selected rules; @@ -665,6 +672,9 @@ type RulerSpec struct { // Define configuration for connecting to alertmanager. Maps to the `alertmanagers.config` arg. AlertmanagersConfig *corev1.SecretKeySelector `json:"alertmanagersConfig,omitempty"` // Interval between consecutive evaluations. + // + // Default: "1m" + // +kubebuilder:default:="1m" EvaluationInterval Duration `json:"evaluationInterval,omitempty"` // DataVolume specifies how volume shall be used diff --git a/pkg/api/monitoring/v1alpha1/zz_generated.deepcopy.go b/pkg/api/monitoring/v1alpha1/zz_generated.deepcopy.go index 3d7ce822..0fa46d8c 100644 --- a/pkg/api/monitoring/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/api/monitoring/v1alpha1/zz_generated.deepcopy.go @@ -285,6 +285,11 @@ func (in *CompactorSpec) DeepCopyInto(out *CompactorSpec) { *out = new(bool) **out = **in } + if in.Retention != nil { + in, out := &in.Retention, &out.Retention + *out = new(Retention) + **out = **in + } if in.DataVolume != nil { in, out := &in.DataVolume, &out.DataVolume *out = new(KubernetesVolume) @@ -624,6 +629,7 @@ func (in *IngesterList) DeepCopyObject() runtime.Object { func (in *IngesterSpec) DeepCopyInto(out *IngesterSpec) { *out = *in in.CommonSpec.DeepCopyInto(&out.CommonSpec) + in.IngesterTSDBCleanUp.DeepCopyInto(&out.IngesterTSDBCleanUp) if in.Tenants != nil { in, out := &in.Tenants, &out.Tenants *out = make([]string, len(*in)) @@ -670,6 +676,11 @@ func (in *IngesterStatus) DeepCopy() *IngesterStatus { func (in *IngesterTemplateSpec) DeepCopyInto(out *IngesterTemplateSpec) { *out = *in in.IngesterSpec.DeepCopyInto(&out.IngesterSpec) + if in.DisableTSDBCleanup != nil { + in, out := &in.DisableTSDBCleanup, &out.DisableTSDBCleanup + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngesterTemplateSpec. @@ -1222,6 +1233,7 @@ func (in *RulerSpec) DeepCopyInto(out *RulerSpec) { *out = *in in.CommonSpec.DeepCopyInto(&out.CommonSpec) in.RulerQueryProxy.DeepCopyInto(&out.RulerQueryProxy) + in.RulerWriteProxy.DeepCopyInto(&out.RulerWriteProxy) in.Envoy.DeepCopyInto(&out.Envoy) in.PrometheusConfigReloader.DeepCopyInto(&out.PrometheusConfigReloader) if in.RuleSelectors != nil { @@ -1299,6 +1311,27 @@ func (in *RulerStatus) DeepCopy() *RulerStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulerTemplateSpec) DeepCopyInto(out *RulerTemplateSpec) { + *out = *in + in.RulerSpec.DeepCopyInto(&out.RulerSpec) + if in.DisableAlertingRulesAutoSelection != nil { + in, out := &in.DisableAlertingRulesAutoSelection, &out.DisableAlertingRulesAutoSelection + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulerTemplateSpec. +func (in *RulerTemplateSpec) DeepCopy() *RulerTemplateSpec { + if in == nil { + return nil + } + out := new(RulerTemplateSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *S3) DeepCopyInto(out *S3) { *out = *in diff --git a/pkg/constants/constans.go b/pkg/constants/constans.go index 6b1be7fb..1eea495b 100644 --- a/pkg/constants/constans.go +++ b/pkg/constants/constans.go @@ -1,5 +1,11 @@ package constants +import ( + "github.com/prometheus/common/version" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + const ( DefaultTenantHeader = "WHIZARD-TENANT" DefaultTenantId = "default-tenant" @@ -54,18 +60,6 @@ const ( LabelNameRulerShardSn = "monitoring.whizard.io/ruler-shard-sn" - ConfigPath = "/etc/whizard/" - StorageDir = "/whizard" - TSDBVolumeName = "tsdb" - - WhizardConfigMountPath = "/etc/whizard/config/" - WhizardWebConfigMountPath = "/etc/whizard/web_config/" - WhizardCertsMountPath = "/etc/whizard/certs/" - EnvoyConfigMountPath = "/etc/envoy/config/" - EnvoyCertsMountPath = "/etc/envoy/certs/" - EnvoyConfigMapMountPath = "/etc/envoy/configmap/" - EnvoySecretMountPath = "/etc/envoy/secret/" - LabelNameStorageHash = "monitoring.whizard.io/storage-hash" LabelNameTenantHash = "monitoring.whizard.io/tenant-hash" LabelNameConfigHash = "monitoring.whizard.io/config-hash" @@ -77,6 +71,24 @@ const ( IngesterStateRunning = "running" ) +// Mount path of config files in containers. +const ( + ConfigPath = "/etc/whizard/" + StorageDir = "/whizard" + TSDBVolumeName = "tsdb" + + WhizardConfigMountPath = "/etc/whizard/config/" + WhizardWebConfigMountPath = "/etc/whizard/web_config/" + WhizardCertsMountPath = "/etc/whizard/certs/" + WhizardConfigMapsMountPath = "/etc/whizard/configmaps/" + WhizardSecretsMountPath = "/etc/whizard/secrets/" + + EnvoyConfigMountPath = "/etc/envoy/config/" + EnvoyCertsMountPath = "/etc/envoy/certs/" + EnvoyConfigMapMountPath = "/etc/envoy/configmap/" + EnvoySecretMountPath = "/etc/envoy/secret/" +) + const ( StorageProviderFILESYSTEM string = "FILESYSTEM" StorageProviderGCS string = "GCS" @@ -108,3 +120,40 @@ const ( CompactHTTPPort = "10912" QueryFrontendHTTPPort = "10913" ) + +const ( + // The version is the same as thanos mod version + DefaultWhizardBaseImage = "thanosio/thanos:v0.33.0" + // The version is the same as prometheus-operator mod version + DefaultPrometheusConfigReloaderImage = "quay.io/prometheus-operator/prometheus-config-reloader:v0.68.0" + + DefaultEnvoyImage = "envoyproxy/envoy:v1.20.2" + DefaultRulerWriteProxyImage = "kubesphere/cortex-tenant:v1.7.2" + DefaultIngesterTSDBCleanupImage = "bash:5.1.16" +) + +var DefaultWhizardMonitoringGatewayImage = "kubesphere/whizard-monitoring-gateway:" + version.Version +var DefaultWhizardBlockManagerImage = "kubesphere/whizard-monitoring-block-manager:" + version.Version + +var DefaultWhizardBaseResources = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("50m"), + corev1.ResourceMemory: resource.MustParse("50Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("4Gi"), + }, +} + +// DefaultWhizardLargeResource for ingester and store +var DefaultWhizardLargeResource = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("50m"), + corev1.ResourceMemory: resource.MustParse("50Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("4"), + corev1.ResourceMemory: resource.MustParse("16Gi"), + }, +} diff --git a/pkg/controllers/config/config.go b/pkg/controllers/config/config.go index f1868870..26c32137 100644 --- a/pkg/controllers/config/config.go +++ b/pkg/controllers/config/config.go @@ -15,7 +15,6 @@ import ( "k8s.io/klog/v2" "github.com/kubesphere/whizard/pkg/client/k8s" - monitoring "github.com/kubesphere/whizard/pkg/controllers/monitoring/options" ) var ( @@ -96,13 +95,11 @@ func defaultConfig() *config { type Config struct { KubernetesOptions *k8s.KubernetesOptions `json:"kubernetes,omitempty" yaml:"kubernetes,omitempty" mapstructure:"kubernetes"` - MonitoringOptions *monitoring.Options `json:"monitoring,omitempty" yaml:"monitoring,omitempty" mapstructure:"monitoring"` } func New() *Config { return &Config{ KubernetesOptions: k8s.NewKubernetesOptions(), - MonitoringOptions: monitoring.NewOptions(), } } diff --git a/pkg/controllers/config/config_test.go b/pkg/controllers/config/config_test.go index a02a6047..d8239a31 100644 --- a/pkg/controllers/config/config_test.go +++ b/pkg/controllers/config/config_test.go @@ -2,32 +2,17 @@ package config import ( "fmt" - "io/ioutil" "os" "testing" - "time" "github.com/google/go-cmp/cmp" - "github.com/kubesphere/whizard/pkg/api/monitoring/v1alpha1" "github.com/kubesphere/whizard/pkg/client/k8s" - "github.com/kubesphere/whizard/pkg/controllers/monitoring/options" - "k8s.io/api/autoscaling/v2beta2" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/yaml" ) func newTestConfig() (*Config, error) { - var replicas1 int32 = 1 - var replicas2 int32 = 2 - var replicas uint64 = 1 - var pflase bool = false - var ptrue bool = true - var stabilizationWindowSeconds int32 = 300 - var cpuAverageUtilization int32 = 80 - var memAverageUtilization int32 = 80 - var sizeLimit = resource.MustParse("500Mi") + var conf = &Config{ KubernetesOptions: &k8s.KubernetesOptions{ KubeConfig: "/Users/frezes/.kube/config", @@ -35,269 +20,6 @@ func newTestConfig() (*Config, error) { QPS: 1e6, Burst: 1e6, }, - MonitoringOptions: &options.Options{ - Compactor: &options.CompactorOptions{ - DefaultTenantsPerCompactor: 10, - CommonOptions: options.CommonOptions{ - Replicas: &replicas1, - Image: "kubesphere/thanos:v0.29.5", - LogLevel: "info", - LogFormat: "logfmt", - Flags: []string{"--block-files-concurrency=20", "--compact.blocks-fetch-concurrency=5", "--web.disable"}, - Resources: corev1.ResourceRequirements{}, - }, - DataVolume: &v1alpha1.KubernetesVolume{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaim{ - Spec: corev1.PersistentVolumeClaimSpec{ - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("10Gi"), - }, - }, - }, - }, - }, - }, - Gateway: &options.GatewayOptions{ - CommonOptions: options.CommonOptions{ - Image: "kubesphere/whizard-options-gateway:v0.5.0", - Replicas: &replicas2, - LogLevel: "info", - LogFormat: "logfmt", - }, - }, - Ingester: &options.IngesterOptions{ - LocalTsdbRetention: "7d", - DisableTSDBCleanup: &pflase, - DefaultTenantsPerIngester: 3, - DefaultIngesterRetentionPeriod: time.Hour * 3, - TSDBCleanupImage: "bash:5.1.16", - CommonOptions: options.CommonOptions{ - Image: "kubesphere/thanos:v0.29.5", - Replicas: &replicas2, - LogLevel: "info", - LogFormat: "logfmt", - Resources: corev1.ResourceRequirements{}, - SecurityContext: &corev1.PodSecurityContext{ - FSGroup: &[]int64{0}[0], - }, - }, - DataVolume: &v1alpha1.KubernetesVolume{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaim{ - Spec: corev1.PersistentVolumeClaimSpec{ - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("20Gi"), - }, - }, - }, - }, - }, - }, - Query: &options.QueryOptions{ - CommonOptions: options.CommonOptions{ - Image: "kubesphere/thanos:v0.28.5", - Replicas: &replicas2, - LogLevel: "info", - LogFormat: "logfmt", - Flags: []string{"--query.max-concurrent=200"}, - }, - Envoy: &options.SidecarOptions{ - Image: "envoyproxy/envoy:corev1.20.2", - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("500Mi"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("500Mi"), - }, - }, - }, - }, - QueryFrontend: &options.QueryFrontendOptions{ - CommonOptions: options.CommonOptions{ - Image: "kubesphere/thanos:v0.29.5", - Replicas: &replicas2, - LogLevel: "info", - LogFormat: "logfmt", - }, - Envoy: &options.SidecarOptions{ - Image: "envoyproxy/envoy:corev1.20.2", - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("500Mi"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("500Mi"), - }, - }, - }, - }, - Router: &options.RouterOptions{ - ReplicationFactor: &replicas, - CommonOptions: options.CommonOptions{ - Image: "kubesphere/thanos:v0.29.5", - Replicas: &replicas2, - LogLevel: "info", - LogFormat: "logfmt", - }, - Envoy: &options.SidecarOptions{ - Image: "envoyproxy/envoy:corev1.20.2", - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("500Mi"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("500Mi"), - }, - }, - }, - }, - Ruler: &options.RulerOptions{ - Shards: &replicas1, - EvaluationInterval: "1m", - RuleSelectors: []*metav1.LabelSelector{ - &metav1.LabelSelector{ - MatchLabels: map[string]string{"role": "alert-rules"}, - }, - }, - AlertmanagersURL: []string{"dnssrv+http://alertmanager-operated.kubesphere-monitoring-system.svc:9093"}, - CommonOptions: options.CommonOptions{ - Image: "kubesphere/thanos:v0.29.5", - Replicas: &replicas1, - LogLevel: "info", - LogFormat: "logfmt", - }, - DataVolume: &v1alpha1.KubernetesVolume{ - EmptyDir: &corev1.EmptyDirVolumeSource{ - SizeLimit: &sizeLimit, - }, - }, - PrometheusConfigReloader: options.SidecarOptions{ - Image: "kubesphere/prometheus-config-reloader:v0.55.1", - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("50Mi"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("50Mi"), - }, - }, - }, - RulerQueryProxy: options.SidecarOptions{ - Image: "kubesphere/whizard-monitoring-gateway:v0.5.0", - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("50Mi"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("50Mi"), - }, - }, - }, - RulerWriteProxy: options.SidecarOptions{ - Image: "kubesphere/cortex-tenant:v1.7.2", - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("50Mi"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("200m"), - corev1.ResourceMemory: resource.MustParse("400Mi"), - }, - }, - }, - Envoy: &options.SidecarOptions{ - Image: "envoyproxy/envoy:corev1.20.2", - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("500Mi"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("500Mi"), - }, - }, - }, - }, - Store: &options.StoreOptions{ - CommonOptions: options.CommonOptions{ - Replicas: &replicas1, - Image: "kubesphere/thanos:v0.29.5", - LogLevel: "info", - LogFormat: "logfmt", - Flags: []string{"--max-time=-36h", "--web.disable"}, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("500Mi"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("500Mi"), - }, - }, - }, - Scaler: &v1alpha1.AutoScaler{ - MinReplicas: &replicas2, - MaxReplicas: 20, - Behavior: &v2beta2.HorizontalPodAutoscalerBehavior{ - ScaleUp: &v2beta2.HPAScalingRules{ - StabilizationWindowSeconds: &stabilizationWindowSeconds, - }, - }, - Metrics: []v2beta2.MetricSpec{ - { - Type: v2beta2.ResourceMetricSourceType, - Resource: &v2beta2.ResourceMetricSource{ - Name: corev1.ResourceCPU, - Target: v2beta2.MetricTarget{ - Type: v2beta2.UtilizationMetricType, - AverageUtilization: &cpuAverageUtilization, - }, - }, - }, - { - Type: v2beta2.ResourceMetricSourceType, - Resource: &v2beta2.ResourceMetricSource{ - Name: corev1.ResourceMemory, - Target: v2beta2.MetricTarget{ - Type: v2beta2.UtilizationMetricType, - AverageUtilization: &memAverageUtilization, - }, - }, - }, - }, - }, - }, - Storage: &options.StorageOptions{ - BlockManager: &options.BlockManagerOptions{ - Enable: &ptrue, - ServiceAccountName: "whizard-controller-manager", - BlockSyncInterval: &metav1.Duration{Duration: time.Minute}, - CommonOptions: options.CommonOptions{ - Image: "kubesphere/thanos:v0.29.5", - Replicas: &replicas1, - }, - GC: &options.BlockGCOptions{ - Enable: &ptrue, - Image: "kubesphere/whizard-monitoring-block-manager:v0.5.0", - }, - }, - }, - }, } return conf, nil } @@ -308,7 +30,7 @@ func saveTestConfig(t *testing.T, conf *Config) { if err != nil { t.Fatalf("error marshal config. %v", err) } - err = ioutil.WriteFile(fmt.Sprintf("%s.yaml", defaultConfigurationName), content, 0640) + err = os.WriteFile(fmt.Sprintf("%s.yaml", defaultConfigurationName), content, 0640) if err != nil { t.Fatalf("error write configuration file, %v", err) } diff --git a/pkg/controllers/monitoring/compactor_controller.go b/pkg/controllers/monitoring/compactor_controller.go index 473a0010..9f60abb5 100644 --- a/pkg/controllers/monitoring/compactor_controller.go +++ b/pkg/controllers/monitoring/compactor_controller.go @@ -19,9 +19,9 @@ package monitoring import ( "context" + "github.com/imdario/mergo" monitoringv1alpha1 "github.com/kubesphere/whizard/pkg/api/monitoring/v1alpha1" "github.com/kubesphere/whizard/pkg/constants" - "github.com/kubesphere/whizard/pkg/controllers/monitoring/options" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources/compactor" "github.com/kubesphere/whizard/pkg/util" @@ -41,8 +41,6 @@ import ( // CompactorReconciler reconciles a compactor object type CompactorReconciler struct { - DefaulterValidator CompactorDefaulterValidator - Options *options.CompactorOptions client.Client Scheme *runtime.Scheme Context context.Context @@ -84,6 +82,15 @@ func (r *CompactorReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{}, nil } + service := &monitoringv1alpha1.Service{} + if err := r.Get(ctx, *util.ServiceNamespacedName(&instance.ObjectMeta), service); err != nil { + return ctrl.Result{}, err + } + + if _, err := r.applyConfigurationFromCompactorTemplateSpec(instance, resources.ApplyDefaults(service).Spec.CompactorTemplateSpec); err != nil { + return ctrl.Result{}, err + } + if instance.GetDeletionTimestamp().IsZero() { if len(instance.Finalizers) == 0 { instance.Finalizers = append(instance.Finalizers, constants.FinalizerDeletePVC) @@ -97,7 +104,6 @@ func (r *CompactorReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{}, r.Client.Update(r.Context, instance) } - r.DefaulterValidator(instance) if len(instance.Spec.Tenants) == 0 { klog.V(3).Infof("ignore compactor %s/%s because of empty tenants", instance.Name, instance.Namespace) return ctrl.Result{}, nil @@ -110,7 +116,7 @@ func (r *CompactorReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( Context: ctx, } - compactorReconciler, err := compactor.New(baseReconciler, instance, r.Options) + compactorReconciler, err := compactor.New(baseReconciler, instance) if err != nil { return ctrl.Result{}, err } @@ -153,10 +159,9 @@ func (r *CompactorReconciler) mapFuncBySelectorFunc(fn func(metav1.Object) map[s } } -type CompactorDefaulterValidator func(compactor *monitoringv1alpha1.Compactor) +func (r *CompactorReconciler) applyConfigurationFromCompactorTemplateSpec(compactor *monitoringv1alpha1.Compactor, compactorTemplateSpec monitoringv1alpha1.CompactorTemplateSpec) (*monitoringv1alpha1.Compactor, error) { -func CreateCompactorDefaulterValidator(opt *options.CompactorOptions) CompactorDefaulterValidator { - return func(compactor *monitoringv1alpha1.Compactor) { - opt.Override(&compactor.Spec) - } + err := mergo.Merge(&compactor.Spec, compactorTemplateSpec.CompactorSpec) + + return compactor, err } diff --git a/pkg/controllers/monitoring/gateway_controller.go b/pkg/controllers/monitoring/gateway_controller.go index cfc16704..8c556afe 100644 --- a/pkg/controllers/monitoring/gateway_controller.go +++ b/pkg/controllers/monitoring/gateway_controller.go @@ -19,9 +19,9 @@ package monitoring import ( "context" + "github.com/imdario/mergo" monitoringv1alpha1 "github.com/kubesphere/whizard/pkg/api/monitoring/v1alpha1" "github.com/kubesphere/whizard/pkg/constants" - "github.com/kubesphere/whizard/pkg/controllers/monitoring/options" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources/gateway" "github.com/kubesphere/whizard/pkg/util" @@ -46,7 +46,6 @@ type GatewayReconciler struct { client.Client Scheme *runtime.Scheme Context context.Context - Options *options.GatewayOptions } //+kubebuilder:rbac:groups=monitoring.whizard.io,resources=gateways,verbs=get;list;watch;create;update;patch;delete @@ -87,7 +86,15 @@ func (r *GatewayReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct return ctrl.Result{}, nil } - instance = r.validator(instance) + service := &monitoringv1alpha1.Service{} + if err := r.Get(ctx, *util.ServiceNamespacedName(&instance.ObjectMeta), service); err != nil { + return ctrl.Result{}, err + } + + if _, err := r.applyConfigurationFromGatewayTemplateSpec(instance, resources.ApplyDefaults(service).Spec.GatewayTemplateSpec); err != nil { + return ctrl.Result{}, err + } + gatewayReconciler, err := gateway.New( resources.BaseReconciler{ Client: r.Client, @@ -161,7 +168,9 @@ func (r *GatewayReconciler) mapFuncBySelectorFunc(fn func(metav1.Object) map[str } } -func (r *GatewayReconciler) validator(g *monitoringv1alpha1.Gateway) *monitoringv1alpha1.Gateway { - r.Options.Override(&g.Spec) - return g +func (r *GatewayReconciler) applyConfigurationFromGatewayTemplateSpec(gateway *monitoringv1alpha1.Gateway, gatewayTemplateSpec monitoringv1alpha1.GatewaySpec) (*monitoringv1alpha1.Gateway, error) { + + err := mergo.Merge(&gateway.Spec, gatewayTemplateSpec) + + return gateway, err } diff --git a/pkg/controllers/monitoring/ingestor_controller.go b/pkg/controllers/monitoring/ingestor_controller.go index a8114508..80881701 100644 --- a/pkg/controllers/monitoring/ingestor_controller.go +++ b/pkg/controllers/monitoring/ingestor_controller.go @@ -18,19 +18,16 @@ package monitoring import ( "context" - "fmt" "reflect" "strconv" "time" - "github.com/kubesphere/whizard/pkg/api/monitoring/v1alpha1" + "github.com/imdario/mergo" monitoringv1alpha1 "github.com/kubesphere/whizard/pkg/api/monitoring/v1alpha1" "github.com/kubesphere/whizard/pkg/constants" - "github.com/kubesphere/whizard/pkg/controllers/monitoring/options" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources/ingester" "github.com/kubesphere/whizard/pkg/util" - "github.com/prometheus/common/model" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -46,11 +43,9 @@ import ( // IngesterReconciler reconciles a Ingester object type IngesterReconciler struct { - DefaulterValidator IngesterDefaulterValidator client.Client Scheme *runtime.Scheme Context context.Context - Options *options.IngesterOptions } //+kubebuilder:rbac:groups=monitoring.whizard.io,resources=ingesters,verbs=get;list;watch;create;update;patch;delete @@ -83,10 +78,24 @@ func (r *IngesterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c return ctrl.Result{}, err } + if instance.Labels == nil || + instance.Labels[constants.ServiceLabelKey] == "" { + return ctrl.Result{}, nil + } + + service := &monitoringv1alpha1.Service{} + if err := r.Get(ctx, *util.ServiceNamespacedName(&instance.ObjectMeta), service); err != nil { + return ctrl.Result{}, err + } + + if _, err := r.applyConfigurationFromIngesterTemplateSpec(instance, resources.ApplyDefaults(service).Spec.IngesterTemplateSpec); err != nil { + return ctrl.Result{}, err + } + // Add spec.tenants to status.tenants, // so status.tenants will contain all tenants that have been configured. // When the Tenant object is deleted, it will be removed from status.tenants too. - var desiredStatus v1alpha1.IngesterStatus + var desiredStatus monitoringv1alpha1.IngesterStatus var tenantMap = make(map[string]struct{}, len(instance.Spec.Tenants)) for _, tenant := range instance.Spec.Tenants { tenantMap[tenant] = struct{}{} @@ -95,14 +104,14 @@ func (r *IngesterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c for _, tenant := range instance.Status.Tenants { tenantStatusMap[tenant.Name] = struct{}{} _, ok := tenantMap[tenant.Name] - desiredStatus.Tenants = append(desiredStatus.Tenants, v1alpha1.IngesterTenantStatus{ + desiredStatus.Tenants = append(desiredStatus.Tenants, monitoringv1alpha1.IngesterTenantStatus{ Name: tenant.Name, Obsolete: !ok, }) } for _, tenant := range instance.Spec.Tenants { if _, ok := tenantStatusMap[tenant]; !ok { - desiredStatus.Tenants = append(desiredStatus.Tenants, v1alpha1.IngesterTenantStatus{Name: tenant, Obsolete: false}) + desiredStatus.Tenants = append(desiredStatus.Tenants, monitoringv1alpha1.IngesterTenantStatus{Name: tenant, Obsolete: false}) } } if !reflect.DeepEqual(desiredStatus, instance.Status) { @@ -127,16 +136,6 @@ func (r *IngesterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c } } - instance, err = r.DefaulterValidator(instance) - if err != nil { - return ctrl.Result{}, err - } - - if instance.Labels == nil || - instance.Labels[constants.ServiceLabelKey] == "" { - return ctrl.Result{}, nil - } - baseReconciler := resources.BaseReconciler{ Client: r.Client, Log: l, @@ -144,7 +143,7 @@ func (r *IngesterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c Context: ctx, } - ingesterReconciler, err := ingester.New(baseReconciler, instance, r.Options) + ingesterReconciler, err := ingester.New(baseReconciler, instance) if err != nil { return ctrl.Result{}, err } @@ -187,21 +186,9 @@ func (r *IngesterReconciler) mapFuncBySelectorFunc(fn func(metav1.Object) map[st } } -type IngesterDefaulterValidator func(ingester *monitoringv1alpha1.Ingester) (*monitoringv1alpha1.Ingester, error) +func (r *IngesterReconciler) applyConfigurationFromIngesterTemplateSpec(ingester *monitoringv1alpha1.Ingester, ingesterTemplateSpec monitoringv1alpha1.IngesterTemplateSpec) (*monitoringv1alpha1.Ingester, error) { -func CreateIngesterDefaulterValidator(opt *options.IngesterOptions) IngesterDefaulterValidator { + err := mergo.Merge(&ingester.Spec, ingesterTemplateSpec.IngesterSpec) - return func(ingester *monitoringv1alpha1.Ingester) (*monitoringv1alpha1.Ingester, error) { - - opt.Override(&ingester.Spec) - - if ingester.Spec.LocalTsdbRetention != "" { - _, err := model.ParseDuration(ingester.Spec.LocalTsdbRetention) - if err != nil { - return nil, fmt.Errorf("invalid localTsdbRetention: %v", err) - } - } - - return ingester, nil - } + return ingester, err } diff --git a/pkg/controllers/monitoring/options/common.go b/pkg/controllers/monitoring/options/common.go deleted file mode 100644 index 59ec8e19..00000000 --- a/pkg/controllers/monitoring/options/common.go +++ /dev/null @@ -1,242 +0,0 @@ -package options - -import ( - "fmt" - - "github.com/kubesphere/whizard/pkg/api/monitoring/v1alpha1" - "github.com/spf13/pflag" - corev1 "k8s.io/api/core/v1" -) - -type CommonOptions struct { - Image string `json:"image,omitempty" yaml:"image,omitempty"` - ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" yaml:"imagePullPolicy,omitempty"` - ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty" yaml:"imagePullSecrets,omitempty"` - Affinity *corev1.Affinity `json:"affinity,omitempty" yaml:"affinity,omitempty"` - NodeSelector map[string]string `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty"` - Tolerations []corev1.Toleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty"` - Resources corev1.ResourceRequirements `json:"resources,omitempty" yaml:"resources,omitempty"` - SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty" yaml:"securityContext,omitempty"` - Replicas *int32 `json:"replicas,omitempty" yaml:"replicas,omitempty"` - LogLevel string `json:"logLevel,omitempty" yaml:"logLevel,omitempty"` - LogFormat string `json:"logFormat,omitempty" yaml:"logFormat,omitempty"` - Flags []string `json:"flags,omitempty" yaml:"flags,omitempty"` -} - -func NewCommonOptions() CommonOptions { - var replicas int32 = 1 - - return CommonOptions{ - Image: DefaultWhizardImage, - Replicas: &replicas, - } -} -func (o *CommonOptions) Validate() []error { - var errs []error - - if o.Replicas != nil && *o.Replicas < 0 { - errs = append(errs, fmt.Errorf("replicas must be >= 0")) - } - - return errs -} - -func (o *CommonOptions) ApplyTo(options *CommonOptions) *CommonOptions { - if o.Image != "" { - options.Image = o.Image - } - - if o.ImagePullPolicy != "" { - options.ImagePullPolicy = o.ImagePullPolicy - } - - if o.ImagePullSecrets != nil && len(o.ImagePullSecrets) > 0 { - options.ImagePullSecrets = o.ImagePullSecrets - } - - if o.Affinity != nil { - options.Affinity = o.Affinity - } - - if o.Tolerations != nil { - options.Tolerations = o.Tolerations - } - - if o.NodeSelector != nil { - options.NodeSelector = o.NodeSelector - } - - if o.Resources.Limits != nil { - if options.Resources.Limits == nil { - options.Resources.Limits = o.Resources.Limits - } else { - // mergo.Map(options.Resources.Limits, o.Resources.Limits, mergo.WithOverride) - - if !o.Resources.Limits.Cpu().IsZero() { - options.Resources.Limits[corev1.ResourceCPU] = o.Resources.Limits[corev1.ResourceCPU] - } - if !o.Resources.Limits.Memory().IsZero() { - options.Resources.Limits[corev1.ResourceMemory] = o.Resources.Limits[corev1.ResourceMemory] - } - - } - } - - if o.Resources.Requests != nil { - if options.Resources.Requests == nil { - options.Resources.Requests = o.Resources.Requests - } else { - //mergo.Map(options.Resources.Requests, o.Resources.Requests, mergo.WithOverride) - - if !o.Resources.Requests.Cpu().IsZero() { - options.Resources.Requests[corev1.ResourceCPU] = o.Resources.Requests[corev1.ResourceCPU] - } - if !o.Resources.Requests.Memory().IsZero() { - options.Resources.Requests[corev1.ResourceMemory] = o.Resources.Requests[corev1.ResourceMemory] - } - - } - } - - if o.SecurityContext != nil { - options.SecurityContext = o.SecurityContext - } - - if o.Replicas != nil { - options.Replicas = o.Replicas - } - - if o.LogLevel != "" { - options.LogLevel = o.LogLevel - } - - if o.LogFormat != "" { - options.LogFormat = o.LogFormat - } - - if o.Flags != nil { - options.Flags = o.Flags - } - return options -} - -// Override the Options overrides the spec field when it is empty -func (o *CommonOptions) Override(spec *v1alpha1.CommonSpec) { - if spec.Image == "" { - spec.Image = o.Image - } - - if spec.ImagePullPolicy == "" { - spec.ImagePullPolicy = o.ImagePullPolicy - } - - if spec.ImagePullSecrets == nil || len(spec.ImagePullSecrets) == 0 { - spec.ImagePullSecrets = o.ImagePullSecrets - } - - if spec.Replicas == nil || *spec.Replicas < 0 { - spec.Replicas = o.Replicas - } - - if spec.Affinity == nil { - spec.Affinity = o.Affinity - } - - if spec.Tolerations == nil { - spec.Tolerations = o.Tolerations - } - - if spec.NodeSelector == nil { - spec.NodeSelector = o.NodeSelector - } - - if len(spec.Resources.Limits) == 0 { - spec.Resources.Limits = o.Resources.Limits - } - - if len(spec.Resources.Requests) == 0 { - spec.Resources.Requests = o.Resources.Requests - } - - if spec.SecurityContext == nil { - spec.SecurityContext = o.SecurityContext - } - - if spec.LogLevel == "" { - spec.LogLevel = o.LogLevel - } - - if spec.LogFormat == "" { - spec.LogFormat = o.LogFormat - } - - if spec.Flags == nil { - spec.Flags = o.Flags - } -} - -func (o *CommonOptions) AddFlags(fs *pflag.FlagSet, c *CommonOptions, prefix string) { - fs.StringVar(&c.Image, prefix+".image", c.Image, "Image with tag/version.") - fs.StringArrayVar(&c.Flags, prefix+".flags", c.Flags, "Flags with --flag=value.") -} - -type SidecarOptions struct { - // Image is the envoy image with tag/version - Image string `json:"image,omitempty" yaml:"image,omitempty"` - - // Define resources requests and limits for envoy container. - Resources corev1.ResourceRequirements `json:"resources,omitempty" yaml:"resources,omitempty"` -} - -func (o *SidecarOptions) AddFlags(fs *pflag.FlagSet, c *SidecarOptions, prefix string) { - fs.StringVar(&c.Image, prefix+".image", c.Image, "Image with tag/version.") -} - -func (o *SidecarOptions) ApplyTo(options *SidecarOptions) *SidecarOptions { - if o.Image != "" { - options.Image = o.Image - } - if o.Resources.Limits != nil { - if options.Resources.Limits == nil { - options.Resources.Limits = o.Resources.Limits - } else { - // mergo.Map(options.Resources.Limits, o.Resources.Limits, mergo.WithOverride) - if !o.Resources.Limits.Cpu().IsZero() { - options.Resources.Limits[corev1.ResourceCPU] = o.Resources.Limits[corev1.ResourceCPU] - } - if !o.Resources.Limits.Memory().IsZero() { - options.Resources.Limits[corev1.ResourceMemory] = o.Resources.Limits[corev1.ResourceMemory] - } - } - } - - if o.Resources.Requests != nil { - if options.Resources.Requests == nil { - //options.Resources.Requests = o.Resources.Requests - - } else { - //mergo.Map(options.Resources.Requests, o.Resources.Requests, mergo.WithOverride) - if !o.Resources.Requests.Cpu().IsZero() { - options.Resources.Requests[corev1.ResourceCPU] = o.Resources.Requests[corev1.ResourceCPU] - } - if !o.Resources.Requests.Memory().IsZero() { - options.Resources.Requests[corev1.ResourceMemory] = o.Resources.Requests[corev1.ResourceMemory] - } - } - } - return options -} - -// Override the Options overrides the spec field when it is empty -func (o *SidecarOptions) Override(spec *v1alpha1.SidecarSpec) { - if spec.Image == "" { - spec.Image = o.Image - } - if spec.Resources.Limits == nil { - spec.Resources.Limits = o.Resources.Limits - } - - if spec.Resources.Requests == nil { - spec.Resources.Requests = o.Resources.Requests - } -} diff --git a/pkg/controllers/monitoring/options/common_test.go b/pkg/controllers/monitoring/options/common_test.go deleted file mode 100644 index 7a364069..00000000 --- a/pkg/controllers/monitoring/options/common_test.go +++ /dev/null @@ -1,185 +0,0 @@ -package options - -import ( - "reflect" - "testing" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" -) - -func TestCommonOptionsApplyTo(t *testing.T) { - var replicas1 int32 = 1 - var replicas2 int32 = 2 - - testCases := []struct { - name string - options CommonOptions - conf CommonOptions - want *CommonOptions - }{ - { - "good case 1", - NewCommonOptions(), - CommonOptions{}, - &CommonOptions{ - Image: DefaultWhizardImage, - Replicas: &replicas1, - }, - }, - { - "good case 2", - NewCommonOptions(), - CommonOptions{ - Image: "thanos/thanos:v0.28.0", - Replicas: &replicas2, - SecurityContext: &corev1.PodSecurityContext{ - FSGroup: &[]int64{0}[0], - }, - }, - &CommonOptions{ - Image: "thanos/thanos:v0.28.0", - Replicas: &replicas2, - SecurityContext: &corev1.PodSecurityContext{ - FSGroup: &[]int64{0}[0], - }, - }, - }, - { - "good case 3", - CommonOptions{ - Image: DefaultWhizardImage, - Replicas: &replicas2, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("500Mi"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("1"), - corev1.ResourceMemory: resource.MustParse("4Gi"), - }, - }, - }, - CommonOptions{ - Image: "thanos/thanos:v0.28.0", - Replicas: &replicas1, - Resources: corev1.ResourceRequirements{ - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("2"), - corev1.ResourceMemory: resource.MustParse("8Gi"), - }, - }, - }, - &CommonOptions{ - Image: "thanos/thanos:v0.28.0", - Replicas: &replicas1, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("500Mi"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("2"), - corev1.ResourceMemory: resource.MustParse("8Gi"), - }, - }, - }, - }, - } - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - got := tt.conf.ApplyTo(&tt.options) - - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("\nget = %+v, \nwant %+v", got, tt.want) - } - }) - } -} - -func TestSidecarOptionsApplyTo(t *testing.T) { - - testCases := []struct { - name string - options SidecarOptions - conf SidecarOptions - want *SidecarOptions - }{ - { - "good case 1", - SidecarOptions{ - Image: DefaultEnvoyImage, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("500Mi"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("500Mi"), - }, - }}, - SidecarOptions{}, - &SidecarOptions{ - Image: DefaultEnvoyImage, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("500Mi"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("500Mi"), - }, - }, - }, - }, - { - "good case 2", - SidecarOptions{ - Image: DefaultRulerWriteProxyImage, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("50Mi"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("200m"), - corev1.ResourceMemory: resource.MustParse("400Mi"), - }, - }, - }, - SidecarOptions{ - Resources: corev1.ResourceRequirements{ - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("200m"), - corev1.ResourceMemory: resource.MustParse("2Gi"), - }, - }, - }, - &SidecarOptions{ - Image: DefaultRulerWriteProxyImage, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("50Mi"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("200m"), - corev1.ResourceMemory: resource.MustParse("2Gi"), - }, - }, - }, - }, - } - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - got := tt.conf.ApplyTo(&tt.options) - - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("\nget = %+v, \nwant %+v", got, tt.want) - } - }) - } -} diff --git a/pkg/controllers/monitoring/options/component.go b/pkg/controllers/monitoring/options/component.go deleted file mode 100644 index 78a3d8c3..00000000 --- a/pkg/controllers/monitoring/options/component.go +++ /dev/null @@ -1,962 +0,0 @@ -package options - -import ( - "time" - - "github.com/imdario/mergo" - "github.com/kubesphere/whizard/pkg/api/monitoring/v1alpha1" - "github.com/spf13/pflag" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type CompactorOptions struct { - CommonOptions `json:",inline" yaml:",inline" mapstructure:",squash"` - - DataVolume *v1alpha1.KubernetesVolume `json:"dataVolume,omitempty" yaml:"dataVolume,omitempty"` - - DefaultTenantsPerCompactor int `json:"defaultTenantsPerCompactor,omitempty" yaml:"defaultTenantsPerCompactor,omitempty"` - // DisableDownsampling specifies whether to disable downsampling - DisableDownsampling *bool `json:"disableDownsampling,omitempty" yaml:"disableDownsampling,omitempty"` - // Retention configs how long to retain samples - Retention *v1alpha1.Retention `json:"retention,omitempty" yaml:"retention,omitempty"` -} - -func NewCompactorOptions() *CompactorOptions { - return &CompactorOptions{ - CommonOptions: NewCommonOptions(), - DefaultTenantsPerCompactor: DefaultTenantsPerCompactor, - } -} - -func (o *CompactorOptions) AddFlags(fs *pflag.FlagSet, c *CompactorOptions) { - o.CommonOptions.AddFlags(fs, &c.CommonOptions, "compactor") - fs.IntVar(&c.DefaultTenantsPerCompactor, "default-tenants-per-compactor", c.DefaultTenantsPerCompactor, "Number of tenants processed per compactor") -} - -func (o *CompactorOptions) ApplyTo(options *CompactorOptions) { - o.CommonOptions.ApplyTo(&options.CommonOptions) - - if o.DefaultTenantsPerCompactor != 0 { - options.DefaultTenantsPerCompactor = o.DefaultTenantsPerCompactor - } - - if o.DataVolume != nil { - if options.DataVolume == nil { - options.DataVolume = o.DataVolume - } else { - if o.DataVolume.PersistentVolumeClaim != nil { - options.DataVolume.PersistentVolumeClaim = o.DataVolume.PersistentVolumeClaim - } - if o.DataVolume.PersistentVolumeClaimRetentionPolicy != nil { - options.DataVolume.PersistentVolumeClaimRetentionPolicy = o.DataVolume.PersistentVolumeClaimRetentionPolicy - } - if o.DataVolume.EmptyDir != nil { - options.DataVolume.EmptyDir = o.DataVolume.EmptyDir - } - } - } - - if o.DisableDownsampling != nil { - options.DisableDownsampling = o.DisableDownsampling - } - - if o.Retention != nil { - if options.Retention == nil { - options.Retention = o.Retention - } else { - mergo.Merge(options.Retention, o.Retention, mergo.WithOverride) - } - } -} - -// Override the Options overrides the spec field when it is empty -func (o *CompactorOptions) Override(spec *v1alpha1.CompactorSpec) { - o.CommonOptions.Override(&spec.CommonSpec) - - if spec.DataVolume == nil { - spec.DataVolume = o.DataVolume - } - if spec.DisableDownsampling == nil { - spec.DisableDownsampling = o.DisableDownsampling - } -} - -func (o *CompactorOptions) Validate() []error { - var errs []error - - errs = append(errs, o.CommonOptions.Validate()...) - - return errs -} - -type IngesterOptions struct { - CommonOptions `json:",inline" yaml:",inline" mapstructure:",squash"` - - DataVolume *v1alpha1.KubernetesVolume `json:"dataVolume,omitempty" yaml:"dataVolume,omitempty"` - - DefaultTenantsPerIngester int `json:"defaultTenantsPerIngester,omitempty" yaml:"defaultTenantsPerIngester,omitempty"` - - // DefaultIngesterRetentionPeriod Whizard default ingester retention period when it has no tenant. - DefaultIngesterRetentionPeriod time.Duration `json:"defaultIngesterRetentionPeriod,omitempty" yaml:"defaultIngesterRetentionPeriod,omitempty"` - - // LocalTsdbRetention configs how long to retain raw samples on local storage. - LocalTsdbRetention string `json:"localTsdbRetention,omitempty"` - - // Disable the TSDB cleanup of ingester. - // The cleanup will delete the blocks that belong to deleted tenants - // in the data directory of ingester TSDB. - DisableTSDBCleanup *bool `json:"disableTSDBCleanup,omitempty"` - TSDBCleanupImage string `json:"tsdbCleanupImage,omitempty"` -} - -func NewIngesterOptions() *IngesterOptions { - return &IngesterOptions{ - CommonOptions: NewCommonOptions(), - - DefaultTenantsPerIngester: DefaultTenantsPerIngester, - DefaultIngesterRetentionPeriod: DefaultIngesterRetentionPeriod, - TSDBCleanupImage: DefaultTSDBCleanupImage, - } -} - -func (o *IngesterOptions) AddFlags(fs *pflag.FlagSet, io *IngesterOptions) { - o.CommonOptions.AddFlags(fs, &io.CommonOptions, "ingester") - - fs.IntVar(&io.DefaultTenantsPerIngester, "defaultTenantsPerIngester", io.DefaultTenantsPerIngester, "Whizard default tenant count per ingester.") - fs.DurationVar(&io.DefaultIngesterRetentionPeriod, "defaultIngesterRetentionPeriod", io.DefaultIngesterRetentionPeriod, "Whizard default ingester retention period when it has no tenant.") -} - -func (o *IngesterOptions) ApplyTo(options *IngesterOptions) { - o.CommonOptions.ApplyTo(&options.CommonOptions) - - if o.DataVolume != nil { - if options.DataVolume == nil { - options.DataVolume = o.DataVolume - } else { - if o.DataVolume.PersistentVolumeClaim != nil { - options.DataVolume.PersistentVolumeClaim = o.DataVolume.PersistentVolumeClaim - } - if o.DataVolume.PersistentVolumeClaimRetentionPolicy != nil { - options.DataVolume.PersistentVolumeClaimRetentionPolicy = o.DataVolume.PersistentVolumeClaimRetentionPolicy - } - if o.DataVolume.EmptyDir != nil { - options.DataVolume.EmptyDir = o.DataVolume.EmptyDir - } - } - } - - if o.DefaultTenantsPerIngester != 0 { - options.DefaultTenantsPerIngester = o.DefaultTenantsPerIngester - } - if o.DefaultIngesterRetentionPeriod != 0 { - options.DefaultIngesterRetentionPeriod = o.DefaultIngesterRetentionPeriod - } - if o.LocalTsdbRetention != "" { - options.LocalTsdbRetention = o.LocalTsdbRetention - } - if o.DisableTSDBCleanup != nil { - options.DisableTSDBCleanup = o.DisableTSDBCleanup - } - if o.TSDBCleanupImage != "" { - options.TSDBCleanupImage = o.TSDBCleanupImage - } -} - -// Override the Options overrides the spec field when it is empty -func (o *IngesterOptions) Override(spec *v1alpha1.IngesterSpec) { - o.CommonOptions.Override(&spec.CommonSpec) - - if spec.DataVolume == nil { - spec.DataVolume = o.DataVolume - } - if spec.LocalTsdbRetention == "" { - spec.LocalTsdbRetention = o.LocalTsdbRetention - } - -} - -func (o *IngesterOptions) Validate() []error { - var errs []error - - errs = append(errs, o.CommonOptions.Validate()...) - - return errs -} - -type GatewayOptions struct { - CommonOptions `json:",inline" yaml:",inline" mapstructure:",squash"` - - // Secret name for HTTP Server certificate (Kubernetes TLS secret type) - ServerCertificate string `json:"serverCertificate,omitempty"` - // Secret name for HTTP Client CA certificate (Kubernetes TLS secret type) - ClientCACertificate string `json:"clientCaCertificate,omitempty"` - - NodePort int32 `json:"nodePort,omitempty"` -} - -func NewGatewayOptions() *GatewayOptions { - var replicas int32 = 1 - - return &GatewayOptions{ - CommonOptions: CommonOptions{ - Image: DefaultGatewayImage, - Replicas: &replicas, - }, - } -} - -func (o *GatewayOptions) ApplyTo(options *GatewayOptions) { - o.CommonOptions.ApplyTo(&options.CommonOptions) - - if o.NodePort == 0 { - options.NodePort = o.NodePort - } - if o.ClientCACertificate != "" { - options.ClientCACertificate = o.ClientCACertificate - } - if o.ServerCertificate != "" { - options.ServerCertificate = o.ServerCertificate - } -} - -func (o *GatewayOptions) Override(spec *v1alpha1.GatewaySpec) { - o.CommonOptions.Override(&spec.CommonSpec) - if spec.NodePort == 0 { - spec.NodePort = o.NodePort - } - if spec.ServerCertificate != "" { - spec.ServerCertificate = o.ServerCertificate - } - if spec.ClientCACertificate != "" { - spec.ClientCACertificate = o.ClientCACertificate - } -} - -func (o *GatewayOptions) Validate() []error { - var errs []error - - errs = append(errs, o.CommonOptions.Validate()...) - - return errs -} - -func (o *GatewayOptions) AddFlags(fs *pflag.FlagSet, g *GatewayOptions) { - o.CommonOptions.AddFlags(fs, &g.CommonOptions, "gateway") -} - -type QueryOptions struct { - CommonOptions `json:",inline" yaml:",inline" mapstructure:",squash"` - - Envoy *SidecarOptions `json:"envoy,omitempty" yaml:"envoy,omitempty"` - - // Additional StoreApi servers from which Query component queries from - Stores []v1alpha1.QueryStores `json:"stores,omitempty"` - // Selector labels that will be exposed in info endpoint. - SelectorLabels map[string]string `json:"selectorLabels,omitempty"` - // Labels to treat as a replica indicator along which data is deduplicated. - ReplicaLabelNames []string `json:"replicaLabelNames,omitempty"` -} - -func NewQueryOptions() *QueryOptions { - return &QueryOptions{ - CommonOptions: NewCommonOptions(), - Envoy: &SidecarOptions{ - Image: DefaultEnvoyImage, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("50m"), - corev1.ResourceMemory: resource.MustParse("50Mi"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("2"), - corev1.ResourceMemory: resource.MustParse("1Gi"), - }, - }, - }, - } -} - -func (o *QueryOptions) ApplyTo(options *QueryOptions) { - o.CommonOptions.ApplyTo(&options.CommonOptions) - o.Envoy.ApplyTo(options.Envoy) - - if o.Stores != nil { - options.Stores = o.Stores - } - if o.SelectorLabels != nil { - options.SelectorLabels = o.SelectorLabels - } - if o.ReplicaLabelNames != nil { - options.ReplicaLabelNames = o.ReplicaLabelNames - } -} - -// Override the Options overrides the spec field when it is empty -func (o *QueryOptions) Override(spec *v1alpha1.QuerySpec) { - o.CommonOptions.Override(&spec.CommonSpec) - o.Envoy.Override(&spec.Envoy) - - if spec.Stores == nil { - spec.Stores = o.Stores - } - if spec.SelectorLabels == nil { - spec.SelectorLabels = o.SelectorLabels - } - if spec.ReplicaLabelNames == nil { - spec.ReplicaLabelNames = o.ReplicaLabelNames - } -} - -func (o *QueryOptions) Validate() []error { - var errs []error - - errs = append(errs, o.CommonOptions.Validate()...) - - return errs -} - -func (o *QueryOptions) AddFlags(fs *pflag.FlagSet, qo *QueryOptions) { - o.CommonOptions.AddFlags(fs, &qo.CommonOptions, "query") - o.Envoy.AddFlags(fs, qo.Envoy, "query.envoy") -} - -type QueryFrontendOptions struct { - CommonOptions `json:",inline" yaml:",inline" mapstructure:",squash"` - - Envoy *SidecarOptions `json:"envoy,omitempty" yaml:"envoy,omitempty"` - - CacheConfig *v1alpha1.ResponseCacheProviderConfig `json:"cacheConfig,omitempty" yaml:"cacheConfig,omitempty"` -} - -func NewQueryFrontendOptions() *QueryFrontendOptions { - return &QueryFrontendOptions{ - CommonOptions: NewCommonOptions(), - Envoy: &SidecarOptions{ - Image: DefaultEnvoyImage, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("50m"), - corev1.ResourceMemory: resource.MustParse("50Mi"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("2"), - corev1.ResourceMemory: resource.MustParse("1Gi"), - }, - }, - }, - } -} - -func (o *QueryFrontendOptions) ApplyTo(options *QueryFrontendOptions) { - o.CommonOptions.ApplyTo(&options.CommonOptions) - o.Envoy.ApplyTo(options.Envoy) - if o.CacheConfig != nil { - options.CacheConfig = o.CacheConfig - } -} - -// Override the Options overrides the spec field when it is empty -func (o *QueryFrontendOptions) Override(spec *v1alpha1.QueryFrontendSpec) { - o.CommonOptions.Override(&spec.CommonSpec) - o.Envoy.Override(&spec.Envoy) - if spec.CacheConfig == nil { - spec.CacheConfig = o.CacheConfig - } -} - -func (o *QueryFrontendOptions) Validate() []error { - var errs []error - - errs = append(errs, o.CommonOptions.Validate()...) - - return errs -} - -func (o *QueryFrontendOptions) AddFlags(fs *pflag.FlagSet, qfo *QueryFrontendOptions) { - o.CommonOptions.AddFlags(fs, &qfo.CommonOptions, "query-frontend") -} - -type RouterOptions struct { - CommonOptions `json:",inline" yaml:",inline" mapstructure:",squash"` - Envoy *SidecarOptions `json:"envoy,omitempty" yaml:"envoy,omitempty"` - // How many times to replicate incoming write requests - ReplicationFactor *uint64 `json:"replicationFactor,omitempty"` -} - -func NewRouterOptions() *RouterOptions { - var factor uint64 = DefaultRouterReplicationFactor - return &RouterOptions{ - CommonOptions: NewCommonOptions(), - - Envoy: &SidecarOptions{ - Image: DefaultEnvoyImage, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("50m"), - corev1.ResourceMemory: resource.MustParse("50Mi"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("2"), - corev1.ResourceMemory: resource.MustParse("1Gi"), - }, - }, - }, - ReplicationFactor: &factor, - } -} - -func (o *RouterOptions) ApplyTo(options *RouterOptions) { - o.CommonOptions.ApplyTo(&options.CommonOptions) - o.Envoy.ApplyTo(options.Envoy) - - if o.ReplicationFactor != nil { - options.ReplicationFactor = o.ReplicationFactor - } -} - -// Override the Options overrides the spec field when it is empty -func (o *RouterOptions) Override(spec *v1alpha1.RouterSpec) { - o.CommonOptions.Override(&spec.CommonSpec) - o.Envoy.Override(&spec.Envoy) - - if spec.ReplicationFactor == nil { - spec.ReplicationFactor = o.ReplicationFactor - } -} - -func (o *RouterOptions) Validate() []error { - var errs []error - - errs = append(errs, o.CommonOptions.Validate()...) - - return errs -} - -func (o *RouterOptions) AddFlags(fs *pflag.FlagSet, ro *RouterOptions) { - var factor uint64 - o.CommonOptions.AddFlags(fs, &ro.CommonOptions, "router") - fs.Uint64Var(&factor, "router.replicationFactor", *ro.ReplicationFactor, "Whizard router replication factor.") - - ro.ReplicationFactor = &factor -} - -type RulerOptions struct { - CommonOptions `json:",inline" yaml:",inline" mapstructure:",squash"` - - DataVolume *v1alpha1.KubernetesVolume `json:"dataVolume,omitempty" yaml:"dataVolume,omitempty"` - - PrometheusConfigReloader SidecarOptions `json:"prometheusConfigReloader,omitempty" yaml:"prometheusConfigReloader,omitempty"` - RulerQueryProxy SidecarOptions `json:"rulerQueryProxy" yaml:"rulerQueryProxy,omitempty"` - RulerWriteProxy SidecarOptions `json:"rulerWriteProxy" yaml:"rulerWriteProxy,omitempty"` - Envoy *SidecarOptions `json:"envoy,omitempty" yaml:"envoy,omitempty"` - - // Number of shards to take the hash of fully qualified name of the rule group in order to split rules. - // Each shard of rules will be bound to one separate statefulset. - Shards *int32 `json:"shards,omitempty"` - // Label selectors to select which PrometheusRules to mount for alerting and recording. - // The result of multiple selectors are ORed. - RuleSelectors []*metav1.LabelSelector `json:"ruleSelectors,omitempty"` - // Namespaces to be selected for PrometheusRules discovery. If unspecified, only - // the same namespace as the Ruler object is in is used. - RuleNamespaceSelector *metav1.LabelSelector `json:"ruleNamespaceSelector,omitempty"` - // If true, the tenant ruler will not select alerting rules associated with this tenant(cluster) - DisableAlertingRulesAutoSelection *bool `json:"disableAlertingRulesAutoSelection,omitempty"` - - // Labels configure the external label pairs to Ruler. A default replica label - // `ruler_replica` will be always added as a label with the value of the pod's name and it will be dropped in the alerts. - Labels map[string]string `json:"labels,omitempty"` - // AlertDropLabels configure the label names which should be dropped in Ruler alerts. - // The replica label `ruler_replica` will always be dropped in alerts. - AlertDropLabels []string `json:"alertDropLabels,omitempty"` - // Define URLs to send alerts to Alertmanager. - // Note: this field will be ignored if AlertmanagersConfig is specified. - // Maps to the `alertmanagers.url` arg. - AlertmanagersURL []string `json:"alertmanagersUrl,omitempty"` - // Define configuration for connecting to alertmanager. Maps to the `alertmanagers.config` arg. - AlertmanagersConfig *corev1.SecretKeySelector `json:"alertmanagersConfig,omitempty"` - - // Interval between consecutive evaluations. - EvaluationInterval string `json:"evaluationInterval,omitempty"` -} - -func NewRulerOptions() *RulerOptions { - var shards int32 = DefaultRulerShards - return &RulerOptions{ - CommonOptions: NewCommonOptions(), - Shards: &shards, - EvaluationInterval: DefaultRulerEvaluationInterval, - - PrometheusConfigReloader: SidecarOptions{ - Image: DefaultPrometheusConfigReloaderImage, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("50Mi"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("100Mi"), - }, - }, - }, - RulerQueryProxy: SidecarOptions{ - Image: DefaultGatewayImage, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("50Mi"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("2"), - corev1.ResourceMemory: resource.MustParse("8Gi"), - }, - }, - }, - RulerWriteProxy: SidecarOptions{ - Image: DefaultRulerWriteProxyImage, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("50Mi"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("2"), - corev1.ResourceMemory: resource.MustParse("8Gi"), - }, - }, - }, - Envoy: &SidecarOptions{ - Image: DefaultEnvoyImage, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("50m"), - corev1.ResourceMemory: resource.MustParse("50Mi"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("2"), - corev1.ResourceMemory: resource.MustParse("1Gi"), - }, - }, - }, - } -} - -func (o *RulerOptions) ApplyTo(options *RulerOptions) { - o.CommonOptions.ApplyTo(&options.CommonOptions) - o.Envoy.ApplyTo(options.Envoy) - o.PrometheusConfigReloader.ApplyTo(&options.PrometheusConfigReloader) - o.RulerQueryProxy.ApplyTo(&options.RulerQueryProxy) - o.RulerWriteProxy.ApplyTo(&options.RulerWriteProxy) - - if o.DataVolume != nil { - if options.DataVolume == nil { - options.DataVolume = o.DataVolume - } else { - if o.DataVolume.PersistentVolumeClaim != nil { - options.DataVolume.PersistentVolumeClaim = o.DataVolume.PersistentVolumeClaim - } - if o.DataVolume.PersistentVolumeClaimRetentionPolicy != nil { - options.DataVolume.PersistentVolumeClaimRetentionPolicy = o.DataVolume.PersistentVolumeClaimRetentionPolicy - } - if o.DataVolume.EmptyDir != nil { - options.DataVolume.EmptyDir = o.DataVolume.EmptyDir - } - } - } - - if *o.Shards != 0 { - options.Shards = o.Shards - } - if o.RuleSelectors != nil { - options.RuleSelectors = o.RuleSelectors - } - if o.RuleNamespaceSelector != nil { - options.RuleNamespaceSelector = o.RuleNamespaceSelector - } - if o.DisableAlertingRulesAutoSelection != nil { - options.DisableAlertingRulesAutoSelection = o.DisableAlertingRulesAutoSelection - } - if o.Labels != nil { - options.Labels = o.Labels - } - if o.AlertDropLabels != nil { - options.AlertDropLabels = o.AlertDropLabels - } - if o.AlertmanagersURL != nil { - options.AlertmanagersURL = o.AlertmanagersURL - } - if o.AlertmanagersConfig != nil { - options.AlertmanagersConfig = o.AlertmanagersConfig - } - if o.EvaluationInterval != "" { - options.EvaluationInterval = o.EvaluationInterval - } - -} - -// Override the Options overrides the spec field when it is empty -func (o *RulerOptions) Override(spec *v1alpha1.RulerSpec) { - o.CommonOptions.Override(&spec.CommonSpec) - o.Envoy.Override(&spec.Envoy) - o.PrometheusConfigReloader.Override(&spec.PrometheusConfigReloader) - o.RulerQueryProxy.Override(&spec.RulerQueryProxy) - - if spec.DataVolume == nil { - spec.DataVolume = o.DataVolume - } - - if spec.Shards == nil { - spec.Shards = o.Shards - } - if spec.RuleSelectors == nil { - spec.RuleSelectors = o.RuleSelectors - } - if spec.RuleNamespaceSelector == nil { - spec.RuleNamespaceSelector = o.RuleNamespaceSelector - } - if spec.Labels == nil { - spec.Labels = o.Labels - } - if spec.AlertDropLabels == nil { - spec.AlertDropLabels = o.AlertDropLabels - } - if spec.AlertmanagersURL == nil { - spec.AlertmanagersURL = o.AlertmanagersURL - } - if spec.AlertmanagersConfig == nil { - spec.AlertmanagersConfig = o.AlertmanagersConfig - } - if spec.EvaluationInterval == "" { - spec.EvaluationInterval = v1alpha1.Duration(o.EvaluationInterval) - } -} - -func (o *RulerOptions) Validate() []error { - var errs []error - - if _, err := time.ParseDuration(o.EvaluationInterval); err != nil { - errs = append(errs, err) - } - - errs = append(errs, o.CommonOptions.Validate()...) - - return errs -} - -func (o *RulerOptions) AddFlags(fs *pflag.FlagSet, ro *RulerOptions) { - o.CommonOptions.AddFlags(fs, &ro.CommonOptions, "ruler") - o.PrometheusConfigReloader.AddFlags(fs, &ro.PrometheusConfigReloader, "ruler.prometheus-config-reloader") - o.RulerQueryProxy.AddFlags(fs, &ro.RulerQueryProxy, "ruler.query-proxy") - o.RulerWriteProxy.AddFlags(fs, &ro.RulerWriteProxy, "ruler.write-proxy") -} - -type StoreOptions struct { - CommonOptions `json:",inline" yaml:",inline" mapstructure:",squash"` - - // MinTime specifies start of time range limit to serve - MinTime string `json:"minTime,omitempty" yaml:"minTime,omitempty"` - // MaxTime specifies end of time range limit to serve - MaxTime string `json:"maxTime,omitempty" yaml:"maxTime,omitempty"` - // TimeRanges is a list of TimeRange to partition Store. - TimeRanges []v1alpha1.TimeRange `json:"timeRanges,omitempty" yaml:"timeRanges,omitempty"` - DataVolume *v1alpha1.KubernetesVolume `json:"dataVolume,omitempty" yaml:"dataVolume,omitempty"` - IndexCacheConfig *v1alpha1.IndexCacheConfig `json:"indexCacheConfig,omitempty" yaml:"indexCacheConfig,omitempty"` - Scaler *v1alpha1.AutoScaler `json:"scaler,omitempty" yaml:"scaler,omitempty"` -} - -func NewStoreOptions() *StoreOptions { - var replicas int32 = DefaultStoreMinReplicas - - return &StoreOptions{ - CommonOptions: CommonOptions{ - Image: DefaultWhizardImage, - Replicas: &replicas, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("500Mi"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("1"), - corev1.ResourceMemory: resource.MustParse("4Gi"), - }, - }, - }, - } -} - -func (o *StoreOptions) ApplyTo(options *StoreOptions) { - o.CommonOptions.ApplyTo(&options.CommonOptions) - - if o.MinTime != "" { - options.MinTime = o.MinTime - } - if o.MaxTime != "" { - options.MaxTime = o.MaxTime - } - if len(o.TimeRanges) > 0 { - options.TimeRanges = o.TimeRanges - } - - if o.DataVolume != nil { - if options.DataVolume == nil { - options.DataVolume = o.DataVolume - } else { - if o.DataVolume.PersistentVolumeClaim != nil { - options.DataVolume.PersistentVolumeClaim = o.DataVolume.PersistentVolumeClaim - } - if o.DataVolume.PersistentVolumeClaimRetentionPolicy != nil { - options.DataVolume.PersistentVolumeClaimRetentionPolicy = o.DataVolume.PersistentVolumeClaimRetentionPolicy - } - if o.DataVolume.EmptyDir != nil { - options.DataVolume.EmptyDir = o.DataVolume.EmptyDir - } - } - } - - if o.IndexCacheConfig != nil { - if options.IndexCacheConfig == nil { - options.IndexCacheConfig = o.IndexCacheConfig - } - - if o.IndexCacheConfig.InMemoryIndexCacheConfig == nil { - if options.IndexCacheConfig.InMemoryIndexCacheConfig == nil { - options.IndexCacheConfig.InMemoryIndexCacheConfig = o.IndexCacheConfig.InMemoryIndexCacheConfig - } - - if o.IndexCacheConfig.InMemoryIndexCacheConfig.MaxSize != "" { - options.IndexCacheConfig.InMemoryIndexCacheConfig.MaxSize = o.IndexCacheConfig.InMemoryIndexCacheConfig.MaxSize - } - } - } - - if o.Scaler != nil { - if options.Scaler == nil { - options.Scaler = o.Scaler - } - - if o.Scaler.MinReplicas != nil && *o.Scaler.MinReplicas > 0 { - options.Scaler.MinReplicas = o.Scaler.MinReplicas - } - - if o.Scaler.MaxReplicas > 0 { - options.Scaler.MaxReplicas = o.Scaler.MaxReplicas - } - - if o.Scaler.Behavior != nil { - options.Scaler.Behavior = o.Scaler.Behavior - } - } -} - -// Override the Options overrides the spec field when it is empty -func (o *StoreOptions) Override(spec *v1alpha1.StoreSpec) { - o.CommonOptions.Override(&spec.CommonSpec) - - if spec.MinTime == "" { - spec.MinTime = o.MinTime - } - if spec.MaxTime == "" { - spec.MaxTime = o.MaxTime - } - if len(spec.TimeRanges) == 0 { - spec.TimeRanges = o.TimeRanges - } - - if spec.DataVolume == nil { - spec.DataVolume = o.DataVolume - } - if spec.IndexCacheConfig == nil { - spec.IndexCacheConfig = o.IndexCacheConfig - } else { - if spec.IndexCacheConfig.InMemoryIndexCacheConfig == nil { - spec.IndexCacheConfig.InMemoryIndexCacheConfig = o.IndexCacheConfig.InMemoryIndexCacheConfig - } else { - if spec.IndexCacheConfig.MaxSize == "" { - spec.IndexCacheConfig.MaxSize = o.IndexCacheConfig.MaxSize - } - } - } - - if spec.Scaler == nil { - spec.Scaler = o.Scaler - } else { - if spec.Scaler.MaxReplicas == 0 { - spec.Scaler.MaxReplicas = o.Scaler.MaxReplicas - } - - if spec.Scaler.MinReplicas == nil || *spec.Scaler.MinReplicas == 0 { - min := *o.Scaler.MinReplicas - spec.Scaler.MinReplicas = &min - } - - if spec.Scaler.Metrics == nil { - spec.Scaler.Metrics = o.Scaler.Metrics - } - - if spec.Scaler.Behavior == nil { - spec.Scaler.Behavior = o.Scaler.Behavior - } - } -} - -func (o *StoreOptions) Validate() []error { - var errs []error - - errs = append(errs, o.CommonOptions.Validate()...) - - return errs -} - -func (o *StoreOptions) AddFlags(fs *pflag.FlagSet, s *StoreOptions) { - o.CommonOptions.AddFlags(fs, &s.CommonOptions, "store") -} - -type StorageOptions struct { - BlockManager *BlockManagerOptions `json:"blockManager,omitempty"` -} - -type BlockManagerOptions struct { - Enable *bool `json:"enable,omitempty"` - CommonOptions `json:",inline" yaml:",inline" mapstructure:",squash"` - ServiceAccountName string `json:"serviceAccountName,omitempty"` - BlockSyncInterval *metav1.Duration `json:"blockSyncInterval,omitempty"` - GC *BlockGCOptions `json:"gc,omitempty"` -} - -type BlockGCOptions struct { - Enable *bool `json:"enable,omitempty"` - Resources corev1.ResourceRequirements `json:"resources,omitempty"` - Image string `json:"image,omitempty"` - ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` - GCInterval *metav1.Duration `json:"gcInterval,omitempty"` - CleanupTimeout *metav1.Duration `json:"cleanupTimeout,omitempty"` -} - -func NewStorageOptions() *StorageOptions { - enable := true - blockSyncInterval := metav1.Duration{Duration: time.Minute} - return &StorageOptions{ - BlockManager: &BlockManagerOptions{ - Enable: &enable, - CommonOptions: NewCommonOptions(), - BlockSyncInterval: &blockSyncInterval, - GC: &BlockGCOptions{ - Enable: &enable, - Image: DefaultBlockManagerImage, - }, - ServiceAccountName: DefaultServiceAccount, - }, - } -} - -func (o *StorageOptions) ApplyTo(options *StorageOptions) { - if o.BlockManager != nil { - if options.BlockManager == nil { - options.BlockManager = o.BlockManager - } else { - o.BlockManager.CommonOptions.ApplyTo(&options.BlockManager.CommonOptions) - - if options.BlockManager.Enable == nil { - options.BlockManager.Enable = o.BlockManager.Enable - } - - if options.BlockManager.BlockSyncInterval == nil || options.BlockManager.BlockSyncInterval.Duration == 0 { - options.BlockManager.BlockSyncInterval = o.BlockManager.BlockSyncInterval - } - if options.BlockManager.ServiceAccountName == "" { - options.BlockManager.ServiceAccountName = o.BlockManager.ServiceAccountName - } - - if o.BlockManager.GC != nil { - if options.BlockManager.GC == nil { - options.BlockManager.GC = o.BlockManager.GC - } else { - if options.BlockManager.GC.Image == "" { - options.BlockManager.GC.Image = o.BlockManager.GC.Image - } - - if options.BlockManager.GC.ImagePullPolicy == "" { - options.BlockManager.GC.ImagePullPolicy = o.BlockManager.GC.ImagePullPolicy - } - - if options.BlockManager.GC.Resources.Limits == nil { - options.BlockManager.GC.Resources.Limits = o.BlockManager.GC.Resources.Limits - } - - if options.BlockManager.GC.Resources.Requests == nil { - options.BlockManager.GC.Resources.Requests = o.BlockManager.GC.Resources.Requests - } - - if options.BlockManager.GC.GCInterval == nil || - options.BlockManager.GC.GCInterval.Duration == 0 { - options.BlockManager.GC.GCInterval = o.BlockManager.GC.GCInterval - } - - if options.BlockManager.GC.CleanupTimeout == nil || - options.BlockManager.GC.CleanupTimeout.Duration == 0 { - options.BlockManager.GC.CleanupTimeout = o.BlockManager.GC.CleanupTimeout - } - - if options.BlockManager.GC.Enable == nil { - options.BlockManager.GC.Enable = o.BlockManager.GC.Enable - } - } - } - } - } -} - -func (o *StorageOptions) Override(spec *v1alpha1.StorageSpec) { - o.BlockManager.CommonOptions.Override(&spec.BlockManager.CommonSpec) - - if spec.BlockManager.BlockSyncInterval == nil || spec.BlockManager.BlockSyncInterval.Duration == 0 { - spec.BlockManager.BlockSyncInterval = o.BlockManager.BlockSyncInterval - } - - if spec.BlockManager.ServiceAccountName == "" { - spec.BlockManager.ServiceAccountName = o.BlockManager.ServiceAccountName - } - - if spec.BlockManager.GC != nil && - spec.BlockManager.GC.Enable != nil && - *spec.BlockManager.GC.Enable { - if spec.BlockManager.GC.Image == "" { - spec.BlockManager.GC.Image = o.BlockManager.GC.Image - } - if spec.BlockManager.GC.ImagePullPolicy == "" { - spec.BlockManager.GC.ImagePullPolicy = o.BlockManager.GC.ImagePullPolicy - } - if spec.BlockManager.GC.Resources.Limits == nil { - spec.BlockManager.GC.Resources.Limits = o.BlockManager.GC.Resources.Limits - } - if spec.BlockManager.GC.Resources.Requests == nil { - spec.BlockManager.GC.Resources.Requests = o.BlockManager.GC.Resources.Requests - } - if spec.BlockManager.GC.GCInterval == nil || - spec.BlockManager.GC.GCInterval.Duration == 0 { - spec.BlockManager.GC.GCInterval = o.BlockManager.GC.GCInterval - } - if spec.BlockManager.GC.CleanupTimeout == nil || - spec.BlockManager.GC.GCInterval.Duration == 0 { - spec.BlockManager.GC.CleanupTimeout = o.BlockManager.GC.CleanupTimeout - } - } -} - -func (o *StorageOptions) Validate() []error { - var errs []error - if o.BlockManager != nil { - errs = append(errs, o.BlockManager.CommonOptions.Validate()...) - } - - return errs -} - -func (o *StorageOptions) AddFlags(fs *pflag.FlagSet, s *StorageOptions) { - if o.BlockManager != nil && s.BlockManager != nil { - o.BlockManager.CommonOptions.AddFlags(fs, &s.BlockManager.CommonOptions, "storage") - } -} diff --git a/pkg/controllers/monitoring/options/options.go b/pkg/controllers/monitoring/options/options.go deleted file mode 100644 index 26ff5bda..00000000 --- a/pkg/controllers/monitoring/options/options.go +++ /dev/null @@ -1,100 +0,0 @@ -package options - -import ( - "time" - - "github.com/prometheus/common/version" - "github.com/spf13/pflag" -) - -const ( - DefaultWhizardImage = "thanosio/thanos:v0.30.1" - DefaultEnvoyImage = "envoyproxy/envoy:v1.20.2" - DefaultRulerWriteProxyImage = "kubesphere/cortex-tenant:v1.7.2" - DefaultPrometheusConfigReloaderImage = "quay.io/prometheus-operator/prometheus-config-reloader:v0.55.1" - DefaultTSDBCleanupImage = "bash:5.1.16" - - DefaultIngesterRetentionPeriod = time.Hour * 3 - DefaultTenantsPerIngester = 3 - DefaultTenantsPerCompactor = 10 - - DefaultRouterReplicationFactor uint64 = 1 - DefaultRulerShards int32 = 1 - DefaultRulerEvaluationInterval string = "30s" - DefaultStoreMinReplicas int32 = 2 - DefaultStoreMaxReplicas int32 = 20 - - DefaultServiceAccount = "whizard-controller-manager" -) - -var ( - DefaultGatewayImage = "kubesphere/whizard-monitoring-gateway:" + version.Version - DefaultBlockManagerImage = "kubesphere/whizard-monitoring-block-manager:" + version.Version -) - -type Options struct { - Compactor *CompactorOptions `json:"compactor,omitempty" yaml:"compactor,omitempty" mapstructure:"compactor"` - Gateway *GatewayOptions `json:"gateway,omitempty" yaml:"gateway,omitempty" mapstructure:"gateway"` - Ingester *IngesterOptions `json:"ingester,omitempty" yaml:"ingester,omitempty" mapstructure:"ingester"` - Query *QueryOptions `json:"query,omitempty" yaml:"query,omitempty" mapstructure:"query"` - QueryFrontend *QueryFrontendOptions `json:"queryFrontend,omitempty" yaml:"queryFrontend,omitempty" mapstructure:"queryFrontend"` - Router *RouterOptions `json:"router,omitempty" yaml:"router,omitempty" mapstructure:"router"` - Ruler *RulerOptions `json:"ruler,omitempty" yaml:"ruler,omitempty" mapstructure:"ruler"` - Store *StoreOptions `json:"store,omitempty" yaml:"store,omitempty" mapstructure:"store"` - Storage *StorageOptions `json:"storage,omitempty" yaml:"storage,omitempty" mapstructure:"storage"` -} - -func NewOptions() *Options { - return &Options{ - - Compactor: NewCompactorOptions(), - Gateway: NewGatewayOptions(), - Ingester: NewIngesterOptions(), - Query: NewQueryOptions(), - QueryFrontend: NewQueryFrontendOptions(), - Router: NewRouterOptions(), - Ruler: NewRulerOptions(), - Store: NewStoreOptions(), - Storage: NewStorageOptions(), - } -} - -func (o *Options) Validate() []error { - var errs []error - - errs = append(errs, o.Compactor.Validate()...) - errs = append(errs, o.Gateway.Validate()...) - errs = append(errs, o.Ingester.Validate()...) - errs = append(errs, o.Query.Validate()...) - errs = append(errs, o.QueryFrontend.Validate()...) - errs = append(errs, o.Router.Validate()...) - errs = append(errs, o.Ruler.Validate()...) - errs = append(errs, o.Store.Validate()...) - errs = append(errs, o.Storage.Validate()...) - return errs -} - -func (o *Options) ApplyTo(options *Options) { - - o.Compactor.ApplyTo(options.Compactor) - o.Gateway.ApplyTo(options.Gateway) - o.Ingester.ApplyTo(options.Ingester) - o.Query.ApplyTo(options.Query) - o.QueryFrontend.ApplyTo(options.QueryFrontend) - o.Router.ApplyTo(options.Router) - o.Ruler.ApplyTo(options.Ruler) - o.Store.ApplyTo(options.Store) - o.Storage.ApplyTo(options.Storage) -} - -func (o *Options) AddFlags(fs *pflag.FlagSet, c *Options) { - o.Compactor.AddFlags(fs, o.Compactor) - o.Gateway.AddFlags(fs, o.Gateway) - o.Ingester.AddFlags(fs, o.Ingester) - o.Query.AddFlags(fs, o.Query) - o.QueryFrontend.AddFlags(fs, o.QueryFrontend) - o.Router.AddFlags(fs, o.Router) - o.Ruler.AddFlags(fs, o.Ruler) - o.Store.AddFlags(fs, c.Store) - o.Storage.AddFlags(fs, c.Storage) -} diff --git a/pkg/controllers/monitoring/query_controller.go b/pkg/controllers/monitoring/query_controller.go index 302574d6..1b037f4b 100644 --- a/pkg/controllers/monitoring/query_controller.go +++ b/pkg/controllers/monitoring/query_controller.go @@ -19,9 +19,9 @@ package monitoring import ( "context" + "github.com/imdario/mergo" monitoringv1alpha1 "github.com/kubesphere/whizard/pkg/api/monitoring/v1alpha1" "github.com/kubesphere/whizard/pkg/constants" - "github.com/kubesphere/whizard/pkg/controllers/monitoring/options" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources/query" "github.com/kubesphere/whizard/pkg/util" @@ -31,6 +31,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -43,7 +44,6 @@ type QueryReconciler struct { client.Client Scheme *runtime.Scheme Context context.Context - Options *options.Options } //+kubebuilder:rbac:groups=monitoring.whizard.io,resources=queries,verbs=get;list;watch;create;update;patch;delete @@ -53,9 +53,8 @@ type QueryReconciler struct { //+kubebuilder:rbac:groups=monitoring.whizard.io,resources=ingesters,verbs=get;list;watch //+kubebuilder:rbac:groups=monitoring.whizard.io,resources=stores,verbs=get;list;watch //+kubebuilder:rbac:groups=monitoring.whizard.io,resources=rulers,verbs=get;list;watch -//+kubebuilder:rbac:groups=core,resources=services;configmaps;serviceaccounts,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=apps,resources=deployments;statefulsets,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles;rolebindings,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=core,resources=services;configmaps;secrets,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. @@ -84,7 +83,15 @@ func (r *QueryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl return ctrl.Result{}, nil } - instance = r.validator(instance) + service := &monitoringv1alpha1.Service{} + if err := r.Get(ctx, *util.ServiceNamespacedName(&instance.ObjectMeta), service); err != nil { + return ctrl.Result{}, err + } + + if _, err := r.applyConfigurationFromQueryTemplateSpec(instance, resources.ApplyDefaults(service).Spec.QueryTemplateSpec); err != nil { + return ctrl.Result{}, err + } + queryReconciler, err := query.New( resources.BaseReconciler{ Client: r.Client, @@ -93,7 +100,6 @@ func (r *QueryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl Context: ctx, }, instance, - r.Options, ) if err != nil { return ctrl.Result{}, err @@ -142,7 +148,10 @@ func (r *QueryReconciler) mapFuncBySelectorFunc(fn func(metav1.Object) map[strin } } -func (r *QueryReconciler) validator(q *monitoringv1alpha1.Query) *monitoringv1alpha1.Query { - r.Options.Query.Override(&q.Spec) - return q +func (r *QueryReconciler) applyConfigurationFromQueryTemplateSpec(query *monitoringv1alpha1.Query, queryTemplateSpec monitoringv1alpha1.QuerySpec) (*monitoringv1alpha1.Query, error) { + + klog.Infof("applyConfigurationFromQueryTemplateSpec: \nqueryTemplateSpec: %v \nquerySpec: %v", queryTemplateSpec, query.Spec) + err := mergo.Merge(&query.Spec, queryTemplateSpec) + klog.Infof("query.Spec: %v", query.Spec) + return query, err } diff --git a/pkg/controllers/monitoring/query_frontend_controller.go b/pkg/controllers/monitoring/query_frontend_controller.go index 565151f2..b15a846d 100644 --- a/pkg/controllers/monitoring/query_frontend_controller.go +++ b/pkg/controllers/monitoring/query_frontend_controller.go @@ -19,9 +19,9 @@ package monitoring import ( "context" + "github.com/imdario/mergo" monitoringv1alpha1 "github.com/kubesphere/whizard/pkg/api/monitoring/v1alpha1" "github.com/kubesphere/whizard/pkg/constants" - "github.com/kubesphere/whizard/pkg/controllers/monitoring/options" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources/queryfrontend" "github.com/kubesphere/whizard/pkg/util" @@ -43,7 +43,6 @@ type QueryFrontendReconciler struct { client.Client Scheme *runtime.Scheme Context context.Context - Options *options.QueryFrontendOptions } //+kubebuilder:rbac:groups=monitoring.whizard.io,resources=queryfrontends,verbs=get;list;watch;create;update;patch;delete @@ -52,9 +51,8 @@ type QueryFrontendReconciler struct { //+kubebuilder:rbac:groups=monitoring.whizard.io,resources=services,verbs=get;list;watch //+kubebuilder:rbac:groups=monitoring.whizard.io,resources=queries,verbs=get;list;watch //+kubebuilder:rbac:groups=monitoring.whizard.io,resources=tenants,verbs=get;list;watch -//+kubebuilder:rbac:groups=core,resources=services;configmaps;serviceaccounts,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=apps,resources=deployments;statefulsets,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles;rolebindings,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=core,resources=services;configmaps;secrets,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. @@ -83,7 +81,15 @@ func (r *QueryFrontendReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, nil } - instance = r.Validator(instance) + service := &monitoringv1alpha1.Service{} + if err := r.Get(ctx, *util.ServiceNamespacedName(&instance.ObjectMeta), service); err != nil { + return ctrl.Result{}, err + } + + if _, err := r.applyConfigurationFromQueryFrontendTemplateSpec(instance, resources.ApplyDefaults(service).Spec.QueryFrontendTemplateSpec); err != nil { + return ctrl.Result{}, err + } + queryFrontendReconciler, err := queryfrontend.New( resources.BaseReconciler{ Client: r.Client, @@ -138,8 +144,9 @@ func (r *QueryFrontendReconciler) mapFuncBySelectorFunc(fn func(metav1.Object) m } } -func (r *QueryFrontendReconciler) Validator(q *monitoringv1alpha1.QueryFrontend) *monitoringv1alpha1.QueryFrontend { - r.Options.Override(&q.Spec) - return q +func (r *QueryFrontendReconciler) applyConfigurationFromQueryFrontendTemplateSpec(queryFrontend *monitoringv1alpha1.QueryFrontend, queryFrontendTemplateSpec monitoringv1alpha1.QueryFrontendSpec) (*monitoringv1alpha1.QueryFrontend, error) { + + err := mergo.Merge(&queryFrontend.Spec, queryFrontendTemplateSpec) + return queryFrontend, err } diff --git a/pkg/controllers/monitoring/resources/compactor/compactor.go b/pkg/controllers/monitoring/resources/compactor/compactor.go index 3c6b1cd6..fd096e9e 100644 --- a/pkg/controllers/monitoring/resources/compactor/compactor.go +++ b/pkg/controllers/monitoring/resources/compactor/compactor.go @@ -3,7 +3,6 @@ package compactor import ( "github.com/kubesphere/whizard/pkg/api/monitoring/v1alpha1" "github.com/kubesphere/whizard/pkg/constants" - "github.com/kubesphere/whizard/pkg/controllers/monitoring/options" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/pointer" @@ -12,17 +11,15 @@ import ( type Compactor struct { resources.BaseReconciler compactor *v1alpha1.Compactor - option *options.CompactorOptions } -func New(reconciler resources.BaseReconciler, compactor *v1alpha1.Compactor, o *options.CompactorOptions) (*Compactor, error) { +func New(reconciler resources.BaseReconciler, compactor *v1alpha1.Compactor) (*Compactor, error) { if err := reconciler.SetService(compactor); err != nil { return nil, err } return &Compactor{ BaseReconciler: reconciler, compactor: compactor, - option: o, }, nil } diff --git a/pkg/controllers/monitoring/resources/compactor/statefulset.go b/pkg/controllers/monitoring/resources/compactor/statefulset.go index 4730cc9a..28b31c08 100644 --- a/pkg/controllers/monitoring/resources/compactor/statefulset.go +++ b/pkg/controllers/monitoring/resources/compactor/statefulset.go @@ -221,12 +221,7 @@ func (r *Compactor) megerArgs() ([]string, error) { defaultArgs = append(defaultArgs, "--downsampling.disable") } - var retention *v1alpha1.Retention - if r.Service.Spec.Retention != nil { - retention = r.Service.Spec.Retention - } else if r.option.Retention != nil { - retention = r.option.Retention - } + retention := r.compactor.Spec.Retention if retention != nil { if retention.RetentionRaw != "" { diff --git a/pkg/controllers/monitoring/resources/gateway/deployment.go b/pkg/controllers/monitoring/resources/gateway/deployment.go index 03a4c820..db92c3b5 100644 --- a/pkg/controllers/monitoring/resources/gateway/deployment.go +++ b/pkg/controllers/monitoring/resources/gateway/deployment.go @@ -432,7 +432,7 @@ func (g *Gateway) queryAddress() (string, error) { } q := queryList.Items[0] - r, err := query.New(g.BaseReconciler, &q, nil) + r, err := query.New(g.BaseReconciler, &q) if err != nil { return "", err } @@ -458,7 +458,7 @@ func (g *Gateway) remoteWriteAddress() (string, error) { } o := routerList.Items[0] - r, err := router.New(g.BaseReconciler, &o, nil) + r, err := router.New(g.BaseReconciler, &o) if err != nil { return "", err } diff --git a/pkg/controllers/monitoring/resources/ingester/ingester.go b/pkg/controllers/monitoring/resources/ingester/ingester.go index ffeaec08..665c6256 100644 --- a/pkg/controllers/monitoring/resources/ingester/ingester.go +++ b/pkg/controllers/monitoring/resources/ingester/ingester.go @@ -5,7 +5,6 @@ import ( monitoringv1alpha1 "github.com/kubesphere/whizard/pkg/api/monitoring/v1alpha1" "github.com/kubesphere/whizard/pkg/constants" - "github.com/kubesphere/whizard/pkg/controllers/monitoring/options" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -13,17 +12,15 @@ import ( type Ingester struct { resources.BaseReconciler ingester *monitoringv1alpha1.Ingester - options *options.IngesterOptions } -func New(reconciler resources.BaseReconciler, ingester *monitoringv1alpha1.Ingester, options *options.IngesterOptions) (*Ingester, error) { +func New(reconciler resources.BaseReconciler, ingester *monitoringv1alpha1.Ingester) (*Ingester, error) { if err := reconciler.SetService(ingester); err != nil { return nil, err } return &Ingester{ BaseReconciler: reconciler, ingester: ingester, - options: options, }, nil } diff --git a/pkg/controllers/monitoring/resources/ingester/statefulset.go b/pkg/controllers/monitoring/resources/ingester/statefulset.go index 0bfa7217..c45472ba 100644 --- a/pkg/controllers/monitoring/resources/ingester/statefulset.go +++ b/pkg/controllers/monitoring/resources/ingester/statefulset.go @@ -230,8 +230,9 @@ echo [$(date "+%Y-%m-%d %H:%M:%S")] cleanup block end ` func (r *Ingester) generateInitContainer(tsdbVolumeMount *corev1.VolumeMount) []corev1.Container { + // The tsdbVolumeMount is nil means ingester uses empty dir as the storage of TSDB, no need to cleanup. - if (r.options.DisableTSDBCleanup != nil && *r.options.DisableTSDBCleanup) || + if (r.Service.Spec.IngesterTemplateSpec.DisableTSDBCleanup != nil && *r.Service.Spec.IngesterTemplateSpec.DisableTSDBCleanup) || tsdbVolumeMount == nil { return nil } @@ -250,7 +251,7 @@ func (r *Ingester) generateInitContainer(tsdbVolumeMount *corev1.VolumeMount) [] return []corev1.Container{ { Name: initContainerName, - Image: r.options.TSDBCleanupImage, + Image: r.ingester.Spec.IngesterTSDBCleanUp.Image, Command: []string{ "bash", "-c", @@ -260,9 +261,11 @@ func (r *Ingester) generateInitContainer(tsdbVolumeMount *corev1.VolumeMount) [] constants.StorageDir, strings.Join(append(tenants, r.Service.Spec.DefaultTenantId), ","), }, + Resources: r.ingester.Spec.Resources, VolumeMounts: []corev1.VolumeMount{*tsdbVolumeMount}, }, } + } func getTSDBVolumeMount(container corev1.Container) *corev1.VolumeMount { diff --git a/pkg/controllers/monitoring/resources/query/deployment.go b/pkg/controllers/monitoring/resources/query/deployment.go index 0bc04dcd..de9547c2 100644 --- a/pkg/controllers/monitoring/resources/query/deployment.go +++ b/pkg/controllers/monitoring/resources/query/deployment.go @@ -181,8 +181,7 @@ func (q *Query) deployment() (runtime.Object, resources.Operation, error) { return nil, resources.OperationCreateOrUpdate, err } for _, item := range ingesterList.Items { - q.Options.Ingester.Override(&item.Spec) - ingesterInstance, err := ingester.New(q.BaseReconciler, &item, q.Options.Ingester) + ingesterInstance, err := ingester.New(q.BaseReconciler, &item) if err != nil { return nil, "", err } @@ -199,7 +198,6 @@ func (q *Query) deployment() (runtime.Object, resources.Operation, error) { return nil, resources.OperationCreateOrUpdate, err } for _, item := range storeList.Items { - q.Options.Store.Override(&item.Spec) timeRanges := item.Spec.TimeRanges if len(timeRanges) == 0 { timeRanges = append(timeRanges, v1alpha1.TimeRange{ diff --git a/pkg/controllers/monitoring/resources/query/query.go b/pkg/controllers/monitoring/resources/query/query.go index c7eb15e0..765d2ee7 100644 --- a/pkg/controllers/monitoring/resources/query/query.go +++ b/pkg/controllers/monitoring/resources/query/query.go @@ -8,7 +8,6 @@ import ( "github.com/kubesphere/whizard/pkg/api/monitoring/v1alpha1" "github.com/kubesphere/whizard/pkg/constants" - "github.com/kubesphere/whizard/pkg/controllers/monitoring/options" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -20,18 +19,16 @@ const ( type Query struct { resources.BaseReconciler - query *v1alpha1.Query - Options *options.Options + query *v1alpha1.Query } -func New(reconciler resources.BaseReconciler, q *v1alpha1.Query, o *options.Options) (*Query, error) { +func New(reconciler resources.BaseReconciler, q *v1alpha1.Query) (*Query, error) { if err := reconciler.SetService(q); err != nil { return nil, err } return &Query{ BaseReconciler: reconciler, query: q, - Options: o, }, nil } diff --git a/pkg/controllers/monitoring/resources/queryfrontend/deployment.go b/pkg/controllers/monitoring/resources/queryfrontend/deployment.go index 9e833dce..30fc145c 100644 --- a/pkg/controllers/monitoring/resources/queryfrontend/deployment.go +++ b/pkg/controllers/monitoring/resources/queryfrontend/deployment.go @@ -236,7 +236,7 @@ func (q *QueryFrontend) queryAddress() (string, error) { } o := queryList.Items[0] - r, err := query.New(q.BaseReconciler, &o, nil) + r, err := query.New(q.BaseReconciler, &o) if err != nil { return "", err } diff --git a/pkg/controllers/monitoring/resources/router/configmap.go b/pkg/controllers/monitoring/resources/router/configmap.go index 057e581b..e2ea6de3 100644 --- a/pkg/controllers/monitoring/resources/router/configmap.go +++ b/pkg/controllers/monitoring/resources/router/configmap.go @@ -44,8 +44,8 @@ func (r *Router) hashringsConfigMap() (runtime.Object, resources.Operation, erro } for _, item := range ingesterList.Items { - r.Options.Ingester.Override(&item.Spec) - ingester, err := ingester.New(r.BaseReconciler, &item, r.Options.Ingester) + + ingester, err := ingester.New(r.BaseReconciler, &item) if err != nil { return nil, "", err } diff --git a/pkg/controllers/monitoring/resources/router/router.go b/pkg/controllers/monitoring/resources/router/router.go index 427236db..80828703 100644 --- a/pkg/controllers/monitoring/resources/router/router.go +++ b/pkg/controllers/monitoring/resources/router/router.go @@ -5,7 +5,6 @@ import ( "github.com/kubesphere/whizard/pkg/api/monitoring/v1alpha1" "github.com/kubesphere/whizard/pkg/constants" - "github.com/kubesphere/whizard/pkg/controllers/monitoring/options" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -18,18 +17,16 @@ const ( type Router struct { resources.BaseReconciler - router *v1alpha1.Router - Options *options.Options + router *v1alpha1.Router } -func New(reconciler resources.BaseReconciler, r *v1alpha1.Router, o *options.Options) (*Router, error) { +func New(reconciler resources.BaseReconciler, r *v1alpha1.Router) (*Router, error) { if err := reconciler.SetService(r); err != nil { return nil, err } return &Router{ BaseReconciler: reconciler, router: r, - Options: o, }, nil } diff --git a/pkg/controllers/monitoring/resources/ruler/ruler.go b/pkg/controllers/monitoring/resources/ruler/ruler.go index b7c6bde1..7a3ff148 100644 --- a/pkg/controllers/monitoring/resources/ruler/ruler.go +++ b/pkg/controllers/monitoring/resources/ruler/ruler.go @@ -7,7 +7,6 @@ import ( monitoringv1alpha1 "github.com/kubesphere/whizard/pkg/api/monitoring/v1alpha1" "github.com/kubesphere/whizard/pkg/constants" - "github.com/kubesphere/whizard/pkg/controllers/monitoring/options" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/pointer" @@ -26,20 +25,18 @@ var ( type Ruler struct { resources.BaseReconciler - ruler *monitoringv1alpha1.Ruler - Options *options.RulerOptions + ruler *monitoringv1alpha1.Ruler + shardsRuleConfigMapNames []map[string]struct{} // rule configmaps for each shard } -func New(reconciler resources.BaseReconciler, ruler *monitoringv1alpha1.Ruler, - opt *options.RulerOptions) (*Ruler, error) { +func New(reconciler resources.BaseReconciler, ruler *monitoringv1alpha1.Ruler) (*Ruler, error) { if err := reconciler.SetService(ruler); err != nil { return nil, err } return &Ruler{ BaseReconciler: reconciler, ruler: ruler, - Options: opt, }, nil } diff --git a/pkg/controllers/monitoring/resources/ruler/statefulset.go b/pkg/controllers/monitoring/resources/ruler/statefulset.go index 6e71586f..5b79e423 100644 --- a/pkg/controllers/monitoring/resources/ruler/statefulset.go +++ b/pkg/controllers/monitoring/resources/ruler/statefulset.go @@ -495,9 +495,9 @@ func (r *Ruler) statefulSet(shardSn int) (runtime.Object, resources.Operation, e sort.Strings(container.Args[1:]) - defautReloaderConfig := promoperator.DefaultConfig(r.Options.PrometheusConfigReloader.Resources.Limits.Cpu().String(), r.Options.PrometheusConfigReloader.Resources.Limits.Memory().String()) + defautReloaderConfig := promoperator.DefaultConfig(r.ruler.Spec.PrometheusConfigReloader.Resources.Limits.Cpu().String(), r.ruler.Spec.PrometheusConfigReloader.Resources.Limits.Memory().String()) var reloaderConfig = promoperator.ContainerConfig{ - Image: r.Options.PrometheusConfigReloader.Image, + Image: r.ruler.Spec.PrometheusConfigReloader.Image, CPURequests: defautReloaderConfig.ReloaderConfig.CPURequests, MemoryRequests: defautReloaderConfig.ReloaderConfig.MemoryRequests, CPULimits: defautReloaderConfig.ReloaderConfig.CPULimits, @@ -550,7 +550,7 @@ func (r *Ruler) remoteWriteAddress() (string, error) { } o := routerList.Items[0] - r, err := router.New(r.BaseReconciler, &o, nil) + r, err := router.New(r.BaseReconciler, &o) if err != nil { return "", err } @@ -601,7 +601,7 @@ func (r *Ruler) queryAddress() (string, error) { } o := queryList.Items[0] - r, err := query.New(r.BaseReconciler, &o, nil) + r, err := query.New(r.BaseReconciler, &o) if err != nil { return "", err } @@ -630,11 +630,11 @@ func (r *Ruler) addQueryProxyContainer(serviceSpec *monitoringv1alpha1.ServiceSp queryProxyContainer := &corev1.Container{ Name: "query-proxy", - Image: r.Options.RulerQueryProxy.Image, + Image: r.ruler.Spec.RulerQueryProxy.Image, Args: []string{ "--http-address=127.0.0.1:9080", }, - Resources: r.Options.RulerQueryProxy.Resources, + Resources: r.ruler.Spec.RulerQueryProxy.Resources, } queryProxyContainer.Args = append(queryProxyContainer.Args, "--tenant.label-name="+serviceSpec.TenantLabelName) queryProxyContainer.Args = append(queryProxyContainer.Args, "--tenant.header="+serviceSpec.TenantHeader) @@ -746,11 +746,11 @@ func (r *Ruler) addWriteProxyContainer(serviceSpec *monitoringv1alpha1.ServiceSp writeProxyContainer = &corev1.Container{ Name: "write-proxy", - Image: r.Options.RulerWriteProxy.Image, + Image: r.ruler.Spec.RulerWriteProxy.Image, Args: []string{ "--config-content=" + string(cfgContent), }, - Resources: r.Options.RulerWriteProxy.Resources, + Resources: r.ruler.Spec.RulerWriteProxy.Resources, } return writeProxyContainer, nil } diff --git a/pkg/controllers/monitoring/resources/service.go b/pkg/controllers/monitoring/resources/service.go new file mode 100644 index 00000000..81c614ae --- /dev/null +++ b/pkg/controllers/monitoring/resources/service.go @@ -0,0 +1,146 @@ +package resources + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + monitoringv1alpha1 "github.com/kubesphere/whizard/pkg/api/monitoring/v1alpha1" + "github.com/kubesphere/whizard/pkg/constants" +) + +// Apply defaults to the service +func ApplyDefaults(service *monitoringv1alpha1.Service) *monitoringv1alpha1.Service { + var whizardDefaultReplicas int32 = 2 + var whizardCompactorReplicas int32 = 1 + var whizardRulerReplicas int32 = 1 + var whizardQueryReplicas int32 = 3 + var whizardRouterReplicationFactor uint64 = 1 + + if service.Spec.CompactorTemplateSpec.Image == "" { + service.Spec.CompactorTemplateSpec.Image = constants.DefaultWhizardBaseImage + } + if service.Spec.CompactorTemplateSpec.Replicas == nil { + service.Spec.CompactorTemplateSpec.Replicas = &whizardCompactorReplicas + } + if service.Spec.CompactorTemplateSpec.Resources.Size() == 0 { + service.Spec.CompactorTemplateSpec.Resources = constants.DefaultWhizardBaseResources + } + + if service.Spec.GatewayTemplateSpec.Image == "" { + service.Spec.GatewayTemplateSpec.Image = constants.DefaultWhizardMonitoringGatewayImage + } + if service.Spec.GatewayTemplateSpec.Replicas == nil { + service.Spec.GatewayTemplateSpec.Replicas = &whizardDefaultReplicas + } + if service.Spec.GatewayTemplateSpec.Resources.Size() == 0 { + service.Spec.GatewayTemplateSpec.Resources = constants.DefaultWhizardBaseResources + } + + if service.Spec.IngesterTemplateSpec.Image == "" { + service.Spec.IngesterTemplateSpec.Image = constants.DefaultWhizardBaseImage + } + if service.Spec.IngesterTemplateSpec.Replicas == nil { + service.Spec.IngesterTemplateSpec.Replicas = &whizardDefaultReplicas + } + if service.Spec.IngesterTemplateSpec.Resources.Size() == 0 { + service.Spec.IngesterTemplateSpec.Resources = constants.DefaultWhizardLargeResource + } + if service.Spec.IngesterTemplateSpec.IngesterTSDBCleanUp.Image == "" { + service.Spec.IngesterTemplateSpec.IngesterTSDBCleanUp.Image = constants.DefaultIngesterTSDBCleanupImage + } + if service.Spec.IngesterTemplateSpec.IngesterTSDBCleanUp.Resources.Size() == 0 { + service.Spec.IngesterTemplateSpec.IngesterTSDBCleanUp.Resources = constants.DefaultWhizardBaseResources + } + if service.Spec.IngesterTemplateSpec.LocalTsdbRetention == "" { + service.Spec.IngesterTemplateSpec.LocalTsdbRetention = "7d" + } + + if service.Spec.QueryFrontendTemplateSpec.Image == "" { + service.Spec.QueryFrontendTemplateSpec.Image = constants.DefaultWhizardBaseImage + } + if service.Spec.QueryFrontendTemplateSpec.Replicas == nil { + service.Spec.QueryFrontendTemplateSpec.Replicas = &whizardDefaultReplicas + } + if service.Spec.QueryFrontendTemplateSpec.Resources.Size() == 0 { + service.Spec.QueryFrontendTemplateSpec.Resources = constants.DefaultWhizardBaseResources + } + + if service.Spec.QueryTemplateSpec.Image == "" { + service.Spec.QueryTemplateSpec.Image = constants.DefaultWhizardBaseImage + } + if service.Spec.QueryTemplateSpec.Replicas == nil { + service.Spec.QueryTemplateSpec.Replicas = &whizardQueryReplicas + } + if service.Spec.QueryFrontendTemplateSpec.Resources.Size() == 0 { + service.Spec.QueryFrontendTemplateSpec.Resources = constants.DefaultWhizardBaseResources + } + if service.Spec.QueryTemplateSpec.Envoy.Image == "" { + service.Spec.QueryTemplateSpec.Envoy.Image = constants.DefaultEnvoyImage + } + if service.Spec.QueryTemplateSpec.Envoy.Resources.Size() == 0 { + service.Spec.QueryTemplateSpec.Envoy.Resources = constants.DefaultWhizardBaseResources + } + + if service.Spec.RulerTemplateSpec.Image == "" { + service.Spec.RulerTemplateSpec.Image = constants.DefaultWhizardBaseImage + } + if service.Spec.RulerTemplateSpec.Replicas == nil { + service.Spec.RulerTemplateSpec.Replicas = &whizardRulerReplicas + } + if service.Spec.RulerTemplateSpec.Resources.Size() == 0 { + service.Spec.RulerTemplateSpec.Resources = constants.DefaultWhizardBaseResources + } + if service.Spec.RulerTemplateSpec.PrometheusConfigReloader.Image == "" { + service.Spec.RulerTemplateSpec.PrometheusConfigReloader.Image = constants.DefaultPrometheusConfigReloaderImage + } + if service.Spec.RulerTemplateSpec.PrometheusConfigReloader.Resources.Size() == 0 { + service.Spec.RulerTemplateSpec.PrometheusConfigReloader.Resources = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("0.1Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("0.1Gi"), + }, + } + } + if service.Spec.RulerTemplateSpec.RulerQueryProxy.Image == "" { + service.Spec.RulerTemplateSpec.RulerQueryProxy.Image = constants.DefaultWhizardMonitoringGatewayImage + } + if service.Spec.RulerTemplateSpec.RulerQueryProxy.Resources.Size() == 0 { + service.Spec.RulerTemplateSpec.RulerQueryProxy.Resources = constants.DefaultWhizardBaseResources + } + + if service.Spec.RulerTemplateSpec.RulerWriteProxy.Image == "" { + service.Spec.RulerTemplateSpec.RulerWriteProxy.Image = constants.DefaultRulerWriteProxyImage + } + if service.Spec.RulerTemplateSpec.RulerWriteProxy.Resources.Size() == 0 { + service.Spec.RulerTemplateSpec.RulerWriteProxy.Resources = constants.DefaultWhizardBaseResources + } + + if service.Spec.RouterTemplateSpec.Image == "" { + service.Spec.RouterTemplateSpec.Image = constants.DefaultWhizardBaseImage + } + if service.Spec.RouterTemplateSpec.Replicas == nil { + service.Spec.RouterTemplateSpec.Replicas = &whizardDefaultReplicas + } + if service.Spec.RouterTemplateSpec.Resources.Size() == 0 { + service.Spec.RouterTemplateSpec.Resources = constants.DefaultWhizardBaseResources + } + if service.Spec.RouterTemplateSpec.ReplicationFactor == nil { + service.Spec.RouterTemplateSpec.ReplicationFactor = &whizardRouterReplicationFactor + } + + if service.Spec.StoreTemplateSpec.Image == "" { + service.Spec.StoreTemplateSpec.Image = constants.DefaultWhizardBaseImage + } + if service.Spec.StoreTemplateSpec.Replicas == nil { + service.Spec.StoreTemplateSpec.Replicas = &whizardDefaultReplicas + } + if service.Spec.StoreTemplateSpec.Resources.Size() == 0 { + service.Spec.StoreTemplateSpec.Resources = constants.DefaultWhizardBaseResources + } + + return service +} diff --git a/pkg/controllers/monitoring/resources/store/store.go b/pkg/controllers/monitoring/resources/store/store.go index eafb7066..4d114f13 100644 --- a/pkg/controllers/monitoring/resources/store/store.go +++ b/pkg/controllers/monitoring/resources/store/store.go @@ -5,7 +5,6 @@ import ( "github.com/kubesphere/whizard/pkg/api/monitoring/v1alpha1" "github.com/kubesphere/whizard/pkg/constants" - "github.com/kubesphere/whizard/pkg/controllers/monitoring/options" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/pointer" @@ -14,14 +13,12 @@ import ( type Store struct { resources.BaseReconciler store *v1alpha1.Store - *options.StoreOptions } -func New(reconciler resources.BaseReconciler, instance *v1alpha1.Store, o *options.StoreOptions) *Store { +func New(reconciler resources.BaseReconciler, instance *v1alpha1.Store) *Store { return &Store{ BaseReconciler: reconciler, store: instance, - StoreOptions: o, } } diff --git a/pkg/controllers/monitoring/resources/tenant/compactor.go b/pkg/controllers/monitoring/resources/tenant/compactor.go index 998a9c2a..03fe3a2b 100644 --- a/pkg/controllers/monitoring/resources/tenant/compactor.go +++ b/pkg/controllers/monitoring/resources/tenant/compactor.go @@ -111,7 +111,7 @@ func (t *Tenant) compactor() error { needToCreate := true compactor := &monitoringv1alpha1.Compactor{} for _, item := range compactorList.Items { - if len(item.Spec.Tenants) < t.Options.Compactor.DefaultTenantsPerCompactor { + if len(item.Spec.Tenants) < t.Service.Spec.CompactorTemplateSpec.DefaultTenantsPerCompactor { compactor = &item compactor.Spec.Tenants = append(compactor.Spec.Tenants, t.tenant.Name) needToCreate = false diff --git a/pkg/controllers/monitoring/resources/tenant/ingestor.go b/pkg/controllers/monitoring/resources/tenant/ingestor.go index 90b23414..55b8b638 100644 --- a/pkg/controllers/monitoring/resources/tenant/ingestor.go +++ b/pkg/controllers/monitoring/resources/tenant/ingestor.go @@ -149,7 +149,7 @@ func (t *Tenant) ingester() error { for i := 0; i < len(ingesterMapping)+1; i++ { name := t.createIngesterInstanceName(strconv.Itoa(i)) if ingesterItem, ok := ingesterMapping[name]; ok { - if len(ingesterItem.Spec.Tenants) < t.Options.Ingester.DefaultTenantsPerIngester { + if len(ingesterItem.Spec.Tenants) < t.Service.Spec.IngesterTemplateSpec.DefaultTenantsPerIngester { ingester = ingesterItem addTenantToIngesterInstance(t.tenant, ingester) break @@ -251,17 +251,19 @@ func (t *Tenant) removeTenantFromIngesterbyName(namespace, name string) error { // When ingester uses object storage, the ingester retention period uses the DefaultIngesterRetentionPeriod. // When it uses local storage, its retention period is the same as LocalTsdbRetention if v, ok := ingester.Labels[constants.StorageLabelKey]; ok && v != constants.LocalStorage { - retentionPeriod = t.Options.Ingester.DefaultIngesterRetentionPeriod + period, _ := model.ParseDuration(string(t.Service.Spec.IngesterTemplateSpec.DefaultIngesterRetentionPeriod)) + retentionPeriod = time.Duration(period) } else { if ingester.Spec.LocalTsdbRetention != "" { period, _ := model.ParseDuration(ingester.Spec.LocalTsdbRetention) retentionPeriod = time.Duration(period) - } else if t.Options.Ingester.LocalTsdbRetention != "" { + } else if t.Service.Spec.IngesterTemplateSpec.DefaultIngesterRetentionPeriod != "" { period, _ := model.ParseDuration(ingester.Spec.LocalTsdbRetention) retentionPeriod = time.Duration(period) } if retentionPeriod <= 0 { - retentionPeriod = t.Options.Ingester.DefaultIngesterRetentionPeriod + period, _ := model.ParseDuration(string(t.Service.Spec.IngesterTemplateSpec.DefaultIngesterRetentionPeriod)) + retentionPeriod = time.Duration(period) } } diff --git a/pkg/controllers/monitoring/resources/tenant/ruler.go b/pkg/controllers/monitoring/resources/tenant/ruler.go index c4dc8e8c..e11dab8d 100644 --- a/pkg/controllers/monitoring/resources/tenant/ruler.go +++ b/pkg/controllers/monitoring/resources/tenant/ruler.go @@ -78,9 +78,10 @@ func (t *Tenant) createOrUpdateRulerinstance() *monitoringv1alpha1.Ruler { var ruleSelectors []*metav1.LabelSelector // add default rule selectors. (mainly used to select recording rules) - ruleSelectors = append(ruleSelectors, t.Options.Ruler.RuleSelectors...) - if t.Options.Ruler.DisableAlertingRulesAutoSelection == nil || - !*t.Options.Ruler.DisableAlertingRulesAutoSelection { + ruleSelectors = append(ruleSelectors, t.Service.Spec.RulerTemplateSpec.RuleSelectors...) + + if t.Service.Spec.RulerTemplateSpec.DisableAlertingRulesAutoSelection == nil || + !*t.Service.Spec.RulerTemplateSpec.DisableAlertingRulesAutoSelection { // select alerting rules associated with this tenant(cluster) ruleSelectors = append(ruleSelectors, &metav1.LabelSelector{ MatchLabels: map[string]string{ diff --git a/pkg/controllers/monitoring/resources/tenant/tenant.go b/pkg/controllers/monitoring/resources/tenant/tenant.go index 760d0670..ec75a7a3 100644 --- a/pkg/controllers/monitoring/resources/tenant/tenant.go +++ b/pkg/controllers/monitoring/resources/tenant/tenant.go @@ -2,25 +2,21 @@ package tenant import ( monitoringv1alpha1 "github.com/kubesphere/whizard/pkg/api/monitoring/v1alpha1" - "github.com/kubesphere/whizard/pkg/controllers/monitoring/options" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources" ) type Tenant struct { tenant *monitoringv1alpha1.Tenant resources.BaseReconciler - - Options *options.Options } -func New(reconciler resources.BaseReconciler, tenant *monitoringv1alpha1.Tenant, o *options.Options) (*Tenant, error) { +func New(reconciler resources.BaseReconciler, tenant *monitoringv1alpha1.Tenant) (*Tenant, error) { if err := reconciler.SetService(tenant); err != nil { return nil, err } return &Tenant{ tenant: tenant, BaseReconciler: reconciler, - Options: o, }, nil } diff --git a/pkg/controllers/monitoring/router_controller.go b/pkg/controllers/monitoring/router_controller.go index 1984bb21..e0459bf2 100644 --- a/pkg/controllers/monitoring/router_controller.go +++ b/pkg/controllers/monitoring/router_controller.go @@ -19,9 +19,9 @@ package monitoring import ( "context" + "github.com/imdario/mergo" monitoringv1alpha1 "github.com/kubesphere/whizard/pkg/api/monitoring/v1alpha1" "github.com/kubesphere/whizard/pkg/constants" - "github.com/kubesphere/whizard/pkg/controllers/monitoring/options" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources/router" "github.com/kubesphere/whizard/pkg/util" @@ -43,7 +43,6 @@ type RouterReconciler struct { client.Client Scheme *runtime.Scheme Context context.Context - Options *options.Options } //+kubebuilder:rbac:groups=monitoring.whizard.io,resources=routers,verbs=get;list;watch;create;update;patch;delete @@ -51,9 +50,8 @@ type RouterReconciler struct { //+kubebuilder:rbac:groups=monitoring.whizard.io,resources=routers/finalizers,verbs=update //+kubebuilder:rbac:groups=monitoring.whizard.io,resources=services,verbs=get;list;watch //+kubebuilder:rbac:groups=monitoring.whizard.io,resources=ingesters,verbs=get;list;watch -//+kubebuilder:rbac:groups=core,resources=services;configmaps;serviceaccounts,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=apps,resources=deployments;statefulsets,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles;rolebindings,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=core,resources=services;configmaps;secrets,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. @@ -82,7 +80,15 @@ func (r *RouterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr return ctrl.Result{}, nil } - instance = r.validator(instance) + service := &monitoringv1alpha1.Service{} + if err := r.Get(ctx, *util.ServiceNamespacedName(&instance.ObjectMeta), service); err != nil { + return ctrl.Result{}, err + } + + if _, err := r.applyConfigurationFromRouterTemplateSpec(instance, resources.ApplyDefaults(service).Spec.RouterTemplateSpec); err != nil { + return ctrl.Result{}, err + } + routerReconciler, err := router.New( resources.BaseReconciler{ Client: r.Client, @@ -91,7 +97,6 @@ func (r *RouterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr Context: ctx, }, instance, - r.Options, ) if err != nil { return ctrl.Result{}, err @@ -136,8 +141,9 @@ func (r *RouterReconciler) mapFuncBySelectorFunc(fn func(metav1.Object) map[stri } } -func (r *RouterReconciler) validator(router *monitoringv1alpha1.Router) *monitoringv1alpha1.Router { - r.Options.Router.Override(&router.Spec) - return router +func (r *RouterReconciler) applyConfigurationFromRouterTemplateSpec(router *monitoringv1alpha1.Router, routerTemplateSpec monitoringv1alpha1.RouterSpec) (*monitoringv1alpha1.Router, error) { + + err := mergo.Merge(&router.Spec, routerTemplateSpec) + return router, err } diff --git a/pkg/controllers/monitoring/ruler_controller.go b/pkg/controllers/monitoring/ruler_controller.go index c2991376..13fdde22 100644 --- a/pkg/controllers/monitoring/ruler_controller.go +++ b/pkg/controllers/monitoring/ruler_controller.go @@ -19,9 +19,9 @@ package monitoring import ( "context" + "github.com/imdario/mergo" monitoringv1alpha1 "github.com/kubesphere/whizard/pkg/api/monitoring/v1alpha1" "github.com/kubesphere/whizard/pkg/constants" - "github.com/kubesphere/whizard/pkg/controllers/monitoring/options" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources/ruler" "github.com/kubesphere/whizard/pkg/util" @@ -42,8 +42,6 @@ import ( // RulerReconciler reconciles a Ruler object type RulerReconciler struct { - DefaulterValidator RulerDefaulterValidator - Option *options.RulerOptions client.Client Scheme *runtime.Scheme Context context.Context @@ -80,16 +78,20 @@ func (r *RulerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl return ctrl.Result{}, err } - instance, err = r.DefaulterValidator(instance) - if err != nil { - return ctrl.Result{}, err - } - if instance.Labels == nil || instance.Labels[constants.ServiceLabelKey] == "" { return ctrl.Result{}, nil } + service := &monitoringv1alpha1.Service{} + if err := r.Get(ctx, *util.ServiceNamespacedName(&instance.ObjectMeta), service); err != nil { + return ctrl.Result{}, err + } + + if _, err := r.applyConfigurationFromRulerTemplateSpec(instance, resources.ApplyDefaults(service).Spec.RulerTemplateSpec); err != nil { + return ctrl.Result{}, err + } + baseReconciler := resources.BaseReconciler{ Client: r.Client, Log: l, @@ -97,7 +99,7 @@ func (r *RulerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl Context: ctx, } - rulerReconcile, err := ruler.New(baseReconciler, instance, r.Option) + rulerReconcile, err := ruler.New(baseReconciler, instance) if err != nil { return ctrl.Result{}, err } @@ -211,18 +213,9 @@ func (r *RulerReconciler) mapToRulerFunc(ctx context.Context, o client.Object) [ return reqs } -type RulerDefaulterValidator func(ruler *monitoringv1alpha1.Ruler) (*monitoringv1alpha1.Ruler, error) - -func CreateRulerDefaulterValidator(opt *options.RulerOptions) RulerDefaulterValidator { - - return func(ruler *monitoringv1alpha1.Ruler) (*monitoringv1alpha1.Ruler, error) { +func (r *RulerReconciler) applyConfigurationFromRulerTemplateSpec(ruler *monitoringv1alpha1.Ruler, rulerTemplateSpec monitoringv1alpha1.RulerTemplateSpec) (*monitoringv1alpha1.Ruler, error) { - opt.Override(&ruler.Spec) + err := mergo.Merge(&ruler.Spec, rulerTemplateSpec.RulerSpec) - if ruler.Spec.Shards == nil || *ruler.Spec.Shards < 0 { - ruler.Spec.Shards = opt.Shards - } - - return ruler, nil - } + return ruler, err } diff --git a/pkg/controllers/monitoring/storage_controller.go b/pkg/controllers/monitoring/storage_controller.go index 02df49f2..2a517a5b 100644 --- a/pkg/controllers/monitoring/storage_controller.go +++ b/pkg/controllers/monitoring/storage_controller.go @@ -17,7 +17,6 @@ import ( "context" monitoringv1alpha1 "github.com/kubesphere/whizard/pkg/api/monitoring/v1alpha1" - "github.com/kubesphere/whizard/pkg/controllers/monitoring/options" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources/storage" appsv1 "k8s.io/api/apps/v1" @@ -37,8 +36,6 @@ type StorageReconciler struct { client.Client Scheme *runtime.Scheme Context context.Context - - Options *options.StorageOptions } //+kubebuilder:rbac:groups=monitoring.whizard.io,resources=storages,verbs=get;list;watch;create;update;patch;delete @@ -68,7 +65,6 @@ func (r *StorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct } return ctrl.Result{}, err } - instance = r.validate(instance) baseReconciler := resources.BaseReconciler{ Client: r.Client, @@ -125,12 +121,3 @@ func (r *StorageReconciler) mapToStoragebySecretRefFunc(ctx context.Context, o c return reqs } - -func (r *StorageReconciler) validate(storage *monitoringv1alpha1.Storage) *monitoringv1alpha1.Storage { - - if storage.Spec.BlockManager != nil && storage.Spec.BlockManager.Enable != nil && *(storage.Spec.BlockManager.Enable) { - r.Options.Override(&storage.Spec) - } - - return storage -} diff --git a/pkg/controllers/monitoring/store_controller.go b/pkg/controllers/monitoring/store_controller.go index 5264fdad..24a2bb20 100644 --- a/pkg/controllers/monitoring/store_controller.go +++ b/pkg/controllers/monitoring/store_controller.go @@ -19,15 +19,16 @@ package monitoring import ( "context" + "github.com/imdario/mergo" monitoringv1alpha1 "github.com/kubesphere/whizard/pkg/api/monitoring/v1alpha1" "github.com/kubesphere/whizard/pkg/constants" - "github.com/kubesphere/whizard/pkg/controllers/monitoring/options" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources/store" "github.com/kubesphere/whizard/pkg/util" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" @@ -39,17 +40,15 @@ import ( // StoreReconciler reconciles a Store object type StoreReconciler struct { - DefaulterValidator StoreDefaulterValidator client.Client Scheme *runtime.Scheme Context context.Context - - Options *options.StoreOptions } //+kubebuilder:rbac:groups=monitoring.whizard.io,resources=stores,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=monitoring.whizard.io,resources=stores/status,verbs=get;update;patch //+kubebuilder:rbac:groups=monitoring.whizard.io,resources=stores/finalizers,verbs=update +//+kubebuilder:rbac:groups=monitoring.whizard.io,resources=services,verbs=get;list;watch //+kubebuilder:rbac:groups=monitoring.whizard.io,resources=storages,verbs=get;list;watch //+kubebuilder:rbac:groups=core,resources=services,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete @@ -77,17 +76,21 @@ func (r *StoreReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl return ctrl.Result{}, err } - instance, err = r.DefaulterValidator(instance) - if err != nil { - return ctrl.Result{}, err - } - if instance.Labels == nil || instance.Labels[constants.ServiceLabelKey] == "" || instance.Labels[constants.StorageLabelKey] == "" { return ctrl.Result{}, nil } + service := &monitoringv1alpha1.Service{} + if err := r.Get(ctx, *util.ServiceNamespacedName(&instance.ObjectMeta), service); err != nil { + return ctrl.Result{}, err + } + + if _, err := r.applyConfigurationFromStoreTemplateSpec(instance, resources.ApplyDefaults(service).Spec.StoreTemplateSpec); err != nil { + return ctrl.Result{}, err + } + baseReconciler := resources.BaseReconciler{ Client: r.Client, Log: l, @@ -95,7 +98,7 @@ func (r *StoreReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl Context: ctx, } - if err := store.New(baseReconciler, instance, r.Options).Reconcile(); err != nil { + if err := store.New(baseReconciler, instance).Reconcile(); err != nil { return ctrl.Result{}, err } @@ -106,42 +109,41 @@ func (r *StoreReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl func (r *StoreReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&monitoringv1alpha1.Store{}). + Watches(&monitoringv1alpha1.Service{}, + handler.EnqueueRequestsFromMapFunc(r.mapFuncBySelectorFunc(util.ManagedLabelByService))). Watches(&monitoringv1alpha1.Storage{}, - handler.EnqueueRequestsFromMapFunc(r.reconcileRequestFromStorage)). + handler.EnqueueRequestsFromMapFunc(r.mapFuncBySelectorFunc(util.ManagedLabelByStorage))). Owns(&appsv1.StatefulSet{}). Owns(&corev1.Service{}). // Owns(&autoscalingv2beta2.HorizontalPodAutoscaler{}). Complete(r) } -func (r *StoreReconciler) reconcileRequestFromStorage(ctx context.Context, o client.Object) []reconcile.Request { - storeList := &monitoringv1alpha1.StoreList{} - if err := r.Client.List(r.Context, storeList, client.MatchingLabels(util.ManagedLabelByStorage(o))); err != nil { - log.FromContext(r.Context).WithValues("storeList", "").Error(err, "") - return nil - } +func (r *StoreReconciler) mapFuncBySelectorFunc(fn func(metav1.Object) map[string]string) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + storeList := &monitoringv1alpha1.StoreList{} + if err := r.Client.List(r.Context, storeList, client.MatchingLabels(fn(o))); err != nil { + log.FromContext(r.Context).WithValues("storeList", "").Error(err, "") + return nil + } - var reqs []reconcile.Request - for _, item := range storeList.Items { - reqs = append(reqs, reconcile.Request{ - NamespacedName: types.NamespacedName{ - Namespace: item.Namespace, - Name: item.Name, - }, - }) - } + var reqs []reconcile.Request + for _, item := range storeList.Items { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: item.Namespace, + Name: item.Name, + }, + }) + } - return reqs + return reqs + } } -type StoreDefaulterValidator func(store *monitoringv1alpha1.Store) (*monitoringv1alpha1.Store, error) - -func CreateStoreDefaulterValidator(opt *options.StoreOptions) StoreDefaulterValidator { +func (r *StoreReconciler) applyConfigurationFromStoreTemplateSpec(store *monitoringv1alpha1.Store, storeTemplateSpec monitoringv1alpha1.StoreSpec) (*monitoringv1alpha1.Store, error) { - return func(store *monitoringv1alpha1.Store) (*monitoringv1alpha1.Store, error) { + err := mergo.Merge(&store.Spec, storeTemplateSpec) - opt.Override(&store.Spec) - - return store, nil - } + return store, err } diff --git a/pkg/controllers/monitoring/tenant_controller.go b/pkg/controllers/monitoring/tenant_controller.go index 4a9cdd11..c6aa0e76 100644 --- a/pkg/controllers/monitoring/tenant_controller.go +++ b/pkg/controllers/monitoring/tenant_controller.go @@ -21,7 +21,6 @@ import ( monitoringv1alpha1 "github.com/kubesphere/whizard/pkg/api/monitoring/v1alpha1" "github.com/kubesphere/whizard/pkg/constants" - "github.com/kubesphere/whizard/pkg/controllers/monitoring/options" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources" "github.com/kubesphere/whizard/pkg/controllers/monitoring/resources/tenant" "github.com/kubesphere/whizard/pkg/util" @@ -37,12 +36,9 @@ import ( // TenantReconciler reconciles a Tenant object type TenantReconciler struct { - DefaulterValidator TenantDefaulterValidator client.Client Scheme *runtime.Scheme Context context.Context - - Options *options.Options } //+kubebuilder:rbac:groups=monitoring.whizard.io,resources=tenants,verbs=get;list;watch;create;update;patch;delete @@ -89,7 +85,7 @@ func (r *TenantReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr Scheme: r.Scheme, Context: ctx, } - t, err := tenant.New(baseReconciler, instance, r.Options) + t, err := tenant.New(baseReconciler, instance) if err != nil { return ctrl.Result{}, err } @@ -194,14 +190,6 @@ func (r *TenantReconciler) mapToTenantByStore(ctx context.Context, _ client.Obje return reqs } -type TenantDefaulterValidator func(tenant *monitoringv1alpha1.Tenant) (*monitoringv1alpha1.Tenant, error) - -func CreateTenantDefaulterValidator(_ options.Options) TenantDefaulterValidator { - return func(tenant *monitoringv1alpha1.Tenant) (*monitoringv1alpha1.Tenant, error) { - return tenant, nil - } -} - func (r *TenantReconciler) tenantValidator(tenant *monitoringv1alpha1.Tenant) *monitoringv1alpha1.Tenant { if tenant.Labels == nil { tenant.Labels = make(map[string]string)