diff --git a/.chloggen/1710-prometheus-cr-scrape-config-credentials.yaml b/.chloggen/1710-prometheus-cr-scrape-config-credentials.yaml new file mode 100644 index 0000000000..7231c5fcb7 --- /dev/null +++ b/.chloggen/1710-prometheus-cr-scrape-config-credentials.yaml @@ -0,0 +1,16 @@ +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. operator, target allocator, github action) +component: target allocator + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Populate credentials for Prometheus CR (service and pod monitor) scrape configs. + +# One or more tracking issues related to the change +issues: [1669] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: \ No newline at end of file diff --git a/cmd/otel-allocator/README.md b/cmd/otel-allocator/README.md index 3339ccd5cc..e46fcd684d 100644 --- a/cmd/otel-allocator/README.md +++ b/cmd/otel-allocator/README.md @@ -138,6 +138,12 @@ the `targetAllocator:` part of the OpenTelemetryCollector CR. **Note**: The Collector part of this same CR *also* has a serviceAccount key which only affects the collector and *not* the TargetAllocator. +### Service / Pod monitor endpoint credentials + +If your service or pod monitor endpoints require credentials or other supported form of authentication (bearer token, basic auth, OAuth2 etc.), you need to ensure that the collector has access to this information. Due to some limitations in how the endpoints configuration is handled, target allocator currently does **not** support credentials provided via secrets. It is only possible to provide credentials in a file (for more details see issue https://github.com/open-telemetry/opentelemetry-operator/issues/1669). + +In order to ensure your endpoints can be scraped, your collector instance needs to have the particular secret mounted as a file at the correct path. + # Design diff --git a/cmd/otel-allocator/main.go b/cmd/otel-allocator/main.go index a33d31b48b..48f11403b2 100644 --- a/cmd/otel-allocator/main.go +++ b/cmd/otel-allocator/main.go @@ -107,7 +107,7 @@ func main() { defer close(interrupts) if *cliConf.PromCRWatcherConf.Enabled { - promWatcher, err = allocatorWatcher.NewPrometheusCRWatcher(cfg, cliConf) + promWatcher, err = allocatorWatcher.NewPrometheusCRWatcher(setupLog.WithName("prometheus-cr-watcher"), cfg, cliConf) if err != nil { setupLog.Error(err, "Can't start the prometheus watcher") os.Exit(1) @@ -193,7 +193,7 @@ func main() { select { case event := <-eventChan: eventsMetric.WithLabelValues(event.Source.String()).Inc() - loadConfig, err := event.Watcher.LoadConfig() + loadConfig, err := event.Watcher.LoadConfig(ctx) if err != nil { setupLog.Error(err, "Unable to load configuration") continue diff --git a/cmd/otel-allocator/watcher/file.go b/cmd/otel-allocator/watcher/file.go index df561b9188..95b5cfaaa1 100644 --- a/cmd/otel-allocator/watcher/file.go +++ b/cmd/otel-allocator/watcher/file.go @@ -15,6 +15,7 @@ package watcher import ( + "context" "path/filepath" "github.com/fsnotify/fsnotify" @@ -48,7 +49,7 @@ func NewFileWatcher(logger logr.Logger, config config.CLIConfig) (*FileWatcher, }, nil } -func (f *FileWatcher) LoadConfig() (*promconfig.Config, error) { +func (f *FileWatcher) LoadConfig(_ context.Context) (*promconfig.Config, error) { cfg, err := config.Load(f.configFilePath) if err != nil { f.logger.Error(err, "Unable to load configuration") diff --git a/cmd/otel-allocator/watcher/promOperator.go b/cmd/otel-allocator/watcher/promOperator.go index 128a11b38e..9f26a9923e 100644 --- a/cmd/otel-allocator/watcher/promOperator.go +++ b/cmd/otel-allocator/watcher/promOperator.go @@ -15,9 +15,11 @@ package watcher import ( + "context" "fmt" "github.com/go-kit/log" + "github.com/go-logr/logr" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" promv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1" "github.com/prometheus-operator/prometheus-operator/pkg/assets" @@ -29,34 +31,30 @@ import ( "gopkg.in/yaml.v2" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" allocatorconfig "github.com/open-telemetry/opentelemetry-operator/cmd/otel-allocator/config" ) -func NewPrometheusCRWatcher(cfg allocatorconfig.Config, cliConfig allocatorconfig.CLIConfig) (*PrometheusCRWatcher, error) { +func NewPrometheusCRWatcher(logger logr.Logger, cfg allocatorconfig.Config, cliConfig allocatorconfig.CLIConfig) (*PrometheusCRWatcher, error) { mClient, err := monitoringclient.NewForConfig(cliConfig.ClusterConfig) if err != nil { return nil, err } - factory := informers.NewMonitoringInformerFactories(map[string]struct{}{v1.NamespaceAll: {}}, map[string]struct{}{}, mClient, allocatorconfig.DefaultResyncTime, nil) //TODO decide what strategy to use regarding namespaces - - serviceMonitorInformers, err := informers.NewInformersForResource(factory, monitoringv1.SchemeGroupVersion.WithResource(monitoringv1.ServiceMonitorName)) + clientset, err := kubernetes.NewForConfig(cliConfig.ClusterConfig) if err != nil { return nil, err } - podMonitorInformers, err := informers.NewInformersForResource(factory, monitoringv1.SchemeGroupVersion.WithResource(monitoringv1.PodMonitorName)) + factory := informers.NewMonitoringInformerFactories(map[string]struct{}{v1.NamespaceAll: {}}, map[string]struct{}{}, mClient, allocatorconfig.DefaultResyncTime, nil) //TODO decide what strategy to use regarding namespaces + + monitoringInformers, err := getInformers(factory) if err != nil { return nil, err } - monitoringInformers := map[string]*informers.ForResource{ - monitoringv1.ServiceMonitorName: serviceMonitorInformers, - monitoringv1.PodMonitorName: podMonitorInformers, - } - // TODO: We should make these durations configurable prom := &monitoringv1.Prometheus{ Spec: monitoringv1.PrometheusSpec{ @@ -76,7 +74,9 @@ func NewPrometheusCRWatcher(cfg allocatorconfig.Config, cliConfig allocatorconfi podMonSelector := getSelector(cfg.PodMonitorSelector) return &PrometheusCRWatcher{ + logger: logger, kubeMonitoringClient: mClient, + k8sClient: clientset, informers: monitoringInformers, stopChannel: make(chan struct{}), configGenerator: generator, @@ -87,7 +87,9 @@ func NewPrometheusCRWatcher(cfg allocatorconfig.Config, cliConfig allocatorconfi } type PrometheusCRWatcher struct { - kubeMonitoringClient *monitoringclient.Clientset + logger logr.Logger + kubeMonitoringClient monitoringclient.Interface + k8sClient kubernetes.Interface informers map[string]*informers.ForResource stopChannel chan struct{} configGenerator *prometheus.ConfigGenerator @@ -98,13 +100,30 @@ type PrometheusCRWatcher struct { } func getSelector(s map[string]string) labels.Selector { - sel := labels.NewSelector() if s == nil { - return sel + return labels.NewSelector() } return labels.SelectorFromSet(s) } +// getInformers returns a map of informers for the given resources. +func getInformers(factory informers.FactoriesForNamespaces) (map[string]*informers.ForResource, error) { + serviceMonitorInformers, err := informers.NewInformersForResource(factory, monitoringv1.SchemeGroupVersion.WithResource(monitoringv1.ServiceMonitorName)) + if err != nil { + return nil, err + } + + podMonitorInformers, err := informers.NewInformersForResource(factory, monitoringv1.SchemeGroupVersion.WithResource(monitoringv1.PodMonitorName)) + if err != nil { + return nil, err + } + + return map[string]*informers.ForResource{ + monitoringv1.ServiceMonitorName: serviceMonitorInformers, + monitoringv1.PodMonitorName: podMonitorInformers, + }, nil +} + // Watch wrapped informers and wait for an initial sync. func (w *PrometheusCRWatcher) Watch(upstreamEvents chan Event, upstreamErrors chan error) error { event := Event{ @@ -143,12 +162,13 @@ func (w *PrometheusCRWatcher) Close() error { return nil } -func (w *PrometheusCRWatcher) LoadConfig() (*promconfig.Config, error) { +func (w *PrometheusCRWatcher) LoadConfig(ctx context.Context) (*promconfig.Config, error) { + store := assets.NewStore(w.k8sClient.CoreV1(), w.k8sClient.CoreV1()) serviceMonitorInstances := make(map[string]*monitoringv1.ServiceMonitor) - smRetrieveErr := w.informers[monitoringv1.ServiceMonitorName].ListAll(w.serviceMonitorSelector, func(sm interface{}) { monitor := sm.(*monitoringv1.ServiceMonitor) key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(monitor) + w.addStoreAssetsForServiceMonitor(ctx, monitor.Name, monitor.Namespace, monitor.Spec.Endpoints, store) serviceMonitorInstances[key] = monitor }) if smRetrieveErr != nil { @@ -159,19 +179,13 @@ func (w *PrometheusCRWatcher) LoadConfig() (*promconfig.Config, error) { pmRetrieveErr := w.informers[monitoringv1.PodMonitorName].ListAll(w.podMonitorSelector, func(pm interface{}) { monitor := pm.(*monitoringv1.PodMonitor) key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(monitor) + w.addStoreAssetsForPodMonitor(ctx, monitor.Name, monitor.Namespace, monitor.Spec.PodMetricsEndpoints, store) podMonitorInstances[key] = monitor }) if pmRetrieveErr != nil { return nil, pmRetrieveErr } - store := assets.Store{ - TLSAssets: nil, - TokenAssets: nil, - BasicAuthAssets: nil, - OAuth2Assets: nil, - SigV4Assets: nil, - } generatedConfig, err := w.configGenerator.GenerateServerConfiguration( "30s", "", @@ -184,7 +198,7 @@ func (w *PrometheusCRWatcher) LoadConfig() (*promconfig.Config, error) { podMonitorInstances, map[string]*monitoringv1.Probe{}, map[string]*promv1alpha1.ScrapeConfig{}, - &store, + store, nil, nil, nil, @@ -211,3 +225,89 @@ func (w *PrometheusCRWatcher) LoadConfig() (*promconfig.Config, error) { } return promCfg, nil } + +// addStoreAssetsForServiceMonitor adds authentication / authorization related information to the assets store, +// based on the service monitor and endpoints specs. +// This code borrows from +// https://github.com/prometheus-operator/prometheus-operator/blob/06b5c4189f3f72737766d86103d049115c3aff48/pkg/prometheus/resource_selector.go#L73. +func (w *PrometheusCRWatcher) addStoreAssetsForServiceMonitor( + ctx context.Context, + smName, smNamespace string, + endps []monitoringv1.Endpoint, + store *assets.Store, +) { + var err error + for i, endp := range endps { + objKey := fmt.Sprintf("serviceMonitor/%s/%s/%d", smNamespace, smName, i) + + if err = store.AddBearerToken(ctx, smNamespace, endp.BearerTokenSecret, objKey); err != nil { + break + } + + if err = store.AddBasicAuth(ctx, smNamespace, endp.BasicAuth, objKey); err != nil { + break + } + + if endp.TLSConfig != nil { + if err = store.AddTLSConfig(ctx, smNamespace, endp.TLSConfig); err != nil { + break + } + } + + if err = store.AddOAuth2(ctx, smNamespace, endp.OAuth2, objKey); err != nil { + break + } + + smAuthKey := fmt.Sprintf("serviceMonitor/auth/%s/%s/%d", smNamespace, smName, i) + if err = store.AddSafeAuthorizationCredentials(ctx, smNamespace, endp.Authorization, smAuthKey); err != nil { + break + } + } + + if err != nil { + w.logger.Error(err, "Failed to obtain credentials for a ServiceMonitor", "serviceMonitor", smName) + } +} + +// addStoreAssetsForServiceMonitor adds authentication / authorization related information to the assets store, +// based on the service monitor and pod metrics endpoints specs. +// This code borrows from +// https://github.com/prometheus-operator/prometheus-operator/blob/06b5c4189f3f72737766d86103d049115c3aff48/pkg/prometheus/resource_selector.go#L314. +func (w *PrometheusCRWatcher) addStoreAssetsForPodMonitor( + ctx context.Context, + pmName, pmNamespace string, + podMetricsEndps []monitoringv1.PodMetricsEndpoint, + store *assets.Store, +) { + var err error + for i, endp := range podMetricsEndps { + objKey := fmt.Sprintf("podMonitor/%s/%s/%d", pmNamespace, pmName, i) + + if err = store.AddBearerToken(ctx, pmNamespace, endp.BearerTokenSecret, objKey); err != nil { + break + } + + if err = store.AddBasicAuth(ctx, pmNamespace, endp.BasicAuth, objKey); err != nil { + break + } + + if endp.TLSConfig != nil { + if err = store.AddSafeTLSConfig(ctx, pmNamespace, &endp.TLSConfig.SafeTLSConfig); err != nil { + break + } + } + + if err = store.AddOAuth2(ctx, pmNamespace, endp.OAuth2, objKey); err != nil { + break + } + + smAuthKey := fmt.Sprintf("podMonitor/auth/%s/%s/%d", pmNamespace, pmName, i) + if err = store.AddSafeAuthorizationCredentials(ctx, pmNamespace, endp.Authorization, smAuthKey); err != nil { + break + } + } + + if err != nil { + w.logger.Error(err, "Failed to obtain credentials for a PodMonitor", "podMonitor", pmName) + } +} diff --git a/cmd/otel-allocator/watcher/promOperator_test.go b/cmd/otel-allocator/watcher/promOperator_test.go new file mode 100644 index 0000000000..aafdfe35c0 --- /dev/null +++ b/cmd/otel-allocator/watcher/promOperator_test.go @@ -0,0 +1,345 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package watcher + +import ( + "context" + "testing" + "time" + + "github.com/go-kit/log" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + fakemonitoringclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake" + "github.com/prometheus-operator/prometheus-operator/pkg/informers" + "github.com/prometheus-operator/prometheus-operator/pkg/prometheus" + "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + promconfig "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery" + kubeDiscovery "github.com/prometheus/prometheus/discovery/kubernetes" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" +) + +func TestLoadConfig(t *testing.T) { + tests := []struct { + name string + serviceMonitor *monitoringv1.ServiceMonitor + podMonitor *monitoringv1.PodMonitor + want *promconfig.Config + wantErr bool + }{ + { + name: "simple test", + serviceMonitor: &monitoringv1.ServiceMonitor{ + ObjectMeta: metav1.ObjectMeta{ + Name: "simple", + Namespace: "test", + }, + Spec: monitoringv1.ServiceMonitorSpec{ + JobLabel: "test", + Endpoints: []monitoringv1.Endpoint{ + { + Port: "web", + }, + }, + }, + }, + podMonitor: &monitoringv1.PodMonitor{ + ObjectMeta: metav1.ObjectMeta{ + Name: "simple", + Namespace: "test", + }, + Spec: monitoringv1.PodMonitorSpec{ + JobLabel: "test", + PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{ + { + Port: "web", + }, + }, + }, + }, + want: &promconfig.Config{ + ScrapeConfigs: []*promconfig.ScrapeConfig{ + { + JobName: "serviceMonitor/test/simple/0", + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(10 * time.Second), + HonorTimestamps: true, + HonorLabels: false, + Scheme: "http", + MetricsPath: "/metrics", + ServiceDiscoveryConfigs: []discovery.Config{ + &kubeDiscovery.SDConfig{ + Role: "endpointslice", + NamespaceDiscovery: kubeDiscovery.NamespaceDiscovery{ + Names: []string{"test"}, + IncludeOwnNamespace: false, + }, + HTTPClientConfig: config.DefaultHTTPClientConfig, + }, + }, + HTTPClientConfig: config.DefaultHTTPClientConfig, + }, + { + JobName: "podMonitor/test/simple/0", + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(10 * time.Second), + HonorTimestamps: true, + HonorLabels: false, + Scheme: "http", + MetricsPath: "/metrics", + ServiceDiscoveryConfigs: []discovery.Config{ + &kubeDiscovery.SDConfig{ + Role: "pod", + NamespaceDiscovery: kubeDiscovery.NamespaceDiscovery{ + Names: []string{"test"}, + IncludeOwnNamespace: false, + }, + HTTPClientConfig: config.DefaultHTTPClientConfig, + }, + }, + HTTPClientConfig: config.DefaultHTTPClientConfig, + }, + }, + }, + }, + { + name: "basic auth (serviceMonitor)", + serviceMonitor: &monitoringv1.ServiceMonitor{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth", + Namespace: "test", + }, + Spec: monitoringv1.ServiceMonitorSpec{ + JobLabel: "auth", + Endpoints: []monitoringv1.Endpoint{ + { + Port: "web", + BasicAuth: &monitoringv1.BasicAuth{ + Username: v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "basic-auth", + }, + Key: "username", + }, + Password: v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "basic-auth", + }, + Key: "password", + }, + }, + }, + }, + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "auth", + }, + }, + }, + }, + want: &promconfig.Config{ + GlobalConfig: promconfig.GlobalConfig{}, + ScrapeConfigs: []*promconfig.ScrapeConfig{ + { + JobName: "serviceMonitor/test/auth/0", + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(10 * time.Second), + HonorTimestamps: true, + HonorLabels: false, + Scheme: "http", + MetricsPath: "/metrics", + ServiceDiscoveryConfigs: []discovery.Config{ + &kubeDiscovery.SDConfig{ + Role: "endpointslice", + NamespaceDiscovery: kubeDiscovery.NamespaceDiscovery{ + Names: []string{"test"}, + IncludeOwnNamespace: false, + }, + HTTPClientConfig: config.DefaultHTTPClientConfig, + }, + }, + HTTPClientConfig: config.HTTPClientConfig{ + FollowRedirects: true, + EnableHTTP2: true, + BasicAuth: &config.BasicAuth{ + Username: "admin", + Password: "password", + }, + }, + }, + }, + }, + }, + { + name: "bearer token (podMonitor)", + podMonitor: &monitoringv1.PodMonitor{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bearer", + Namespace: "test", + }, + Spec: monitoringv1.PodMonitorSpec{ + JobLabel: "bearer", + PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{ + { + Port: "web", + BearerTokenSecret: v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "bearer", + }, + Key: "token", + }, + }, + }, + }, + }, + want: &promconfig.Config{ + GlobalConfig: promconfig.GlobalConfig{}, + ScrapeConfigs: []*promconfig.ScrapeConfig{ + { + JobName: "podMonitor/test/bearer/0", + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(10 * time.Second), + HonorTimestamps: true, + HonorLabels: false, + Scheme: "http", + MetricsPath: "/metrics", + ServiceDiscoveryConfigs: []discovery.Config{ + &kubeDiscovery.SDConfig{ + Role: "pod", + NamespaceDiscovery: kubeDiscovery.NamespaceDiscovery{ + Names: []string{"test"}, + IncludeOwnNamespace: false, + }, + HTTPClientConfig: config.DefaultHTTPClientConfig, + }, + }, + HTTPClientConfig: config.HTTPClientConfig{ + FollowRedirects: true, + EnableHTTP2: true, + Authorization: &config.Authorization{ + Type: "Bearer", + Credentials: "bearer-token", + }, + }, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + w := getTestPrometheuCRWatcher(t, tt.serviceMonitor, tt.podMonitor) + for _, informer := range w.informers { + // Start informers in order to populate cache. + informer.Start(w.stopChannel) + } + + // Wait for informers to sync. + for _, informer := range w.informers { + for !informer.HasSynced() { + time.Sleep(50 * time.Millisecond) + } + } + + got, err := w.LoadConfig(context.Background()) + assert.NoError(t, err) + + sanitizeScrapeConfigsForTest(got.ScrapeConfigs) + assert.Equal(t, tt.want.ScrapeConfigs, got.ScrapeConfigs) + }) + } +} + +// getTestPrometheuCRWatcher creates a test instance of PrometheusCRWatcher with fake clients +// and test secrets. +func getTestPrometheuCRWatcher(t *testing.T, sm *monitoringv1.ServiceMonitor, pm *monitoringv1.PodMonitor) *PrometheusCRWatcher { + mClient := fakemonitoringclient.NewSimpleClientset() + if sm != nil { + _, err := mClient.MonitoringV1().ServiceMonitors("test").Create(context.Background(), sm, metav1.CreateOptions{}) + if err != nil { + t.Fatal(t, err) + } + } + if pm != nil { + _, err := mClient.MonitoringV1().PodMonitors("test").Create(context.Background(), pm, metav1.CreateOptions{}) + if err != nil { + t.Fatal(t, err) + } + } + + k8sClient := fake.NewSimpleClientset() + _, err := k8sClient.CoreV1().Secrets("test").Create(context.Background(), &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "basic-auth", + Namespace: "test", + }, + Data: map[string][]byte{"username": []byte("admin"), "password": []byte("password")}, + }, metav1.CreateOptions{}) + if err != nil { + t.Fatal(t, err) + } + _, err = k8sClient.CoreV1().Secrets("test").Create(context.Background(), &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bearer", + Namespace: "test", + }, + Data: map[string][]byte{"token": []byte("bearer-token")}, + }, metav1.CreateOptions{}) + if err != nil { + t.Fatal(t, err) + } + + factory := informers.NewMonitoringInformerFactories(map[string]struct{}{v1.NamespaceAll: {}}, map[string]struct{}{}, mClient, 0, nil) + informers, err := getInformers(factory) + if err != nil { + t.Fatal(t, err) + } + + prom := &monitoringv1.Prometheus{ + Spec: monitoringv1.PrometheusSpec{ + CommonPrometheusFields: monitoringv1.CommonPrometheusFields{ + ScrapeInterval: monitoringv1.Duration("30s"), + }, + }, + } + + generator, err := prometheus.NewConfigGenerator(log.NewNopLogger(), prom, true) + if err != nil { + t.Fatal(t, err) + } + + return &PrometheusCRWatcher{ + kubeMonitoringClient: mClient, + k8sClient: k8sClient, + informers: informers, + configGenerator: generator, + serviceMonitorSelector: getSelector(nil), + podMonitorSelector: getSelector(nil), + stopChannel: make(chan struct{}), + } +} + +// Remove relable configs fields from scrape configs for testing, +// since these are mutated and tested down the line with the hook(s). +func sanitizeScrapeConfigsForTest(scs []*promconfig.ScrapeConfig) { + for _, sc := range scs { + sc.RelabelConfigs = nil + sc.MetricRelabelConfigs = nil + } +} diff --git a/cmd/otel-allocator/watcher/watcher.go b/cmd/otel-allocator/watcher/watcher.go index a1b0e99fa8..c970c7e47f 100644 --- a/cmd/otel-allocator/watcher/watcher.go +++ b/cmd/otel-allocator/watcher/watcher.go @@ -14,12 +14,16 @@ package watcher -import promconfig "github.com/prometheus/prometheus/config" +import ( + "context" + + promconfig "github.com/prometheus/prometheus/config" +) type Watcher interface { // Watch watcher and supply channels which will receive change events Watch(upstreamEvents chan Event, upstreamErrors chan error) error - LoadConfig() (*promconfig.Config, error) + LoadConfig(ctx context.Context) (*promconfig.Config, error) Close() error }