From 20ea43a43bbe5ba4a512ffc7742a92d14cc3d7d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20=C5=9Awi=C4=85tek?= Date: Thu, 13 Apr 2023 18:53:59 +0000 Subject: [PATCH] [target-allocator] fix updating scrape configs (#1620) * [target-allocator] fix updating scrape configs Only take scrape configs from the most recently applied config, and only save them if we've successfully updated. * [target-allocator] drop unnecessary job to scrape config map * [tatget-allocator] add discoverer benchmark --- ...targetallocator_update-scrape-configs.yaml | 16 +++++ cmd/otel-allocator/target/discovery.go | 9 ++- cmd/otel-allocator/target/discovery_test.go | 68 +++++++++++++++++-- cmd/otel-allocator/target/testdata/test.yaml | 3 + 4 files changed, 86 insertions(+), 10 deletions(-) create mode 100755 .chloggen/fix_targetallocator_update-scrape-configs.yaml diff --git a/.chloggen/fix_targetallocator_update-scrape-configs.yaml b/.chloggen/fix_targetallocator_update-scrape-configs.yaml new file mode 100755 index 0000000000..7c4eb4718a --- /dev/null +++ b/.chloggen/fix_targetallocator_update-scrape-configs.yaml @@ -0,0 +1,16 @@ +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. operator, target allocator, github action) +component: target allocator + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: fix updating scrape configs + +# One or more tracking issues related to the change +issues: [1415] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: diff --git a/cmd/otel-allocator/target/discovery.go b/cmd/otel-allocator/target/discovery.go index 9b76399709..38d99b7985 100644 --- a/cmd/otel-allocator/target/discovery.go +++ b/cmd/otel-allocator/target/discovery.go @@ -39,7 +39,6 @@ type Discoverer struct { manager *discovery.Manager close chan struct{} configsMap map[allocatorWatcher.EventSource]*config.Config - jobToScrapeConfig map[string]*config.ScrapeConfig hook discoveryHook scrapeConfigsHash uint64 scrapeConfigsUpdater scrapeConfigsUpdater @@ -59,7 +58,6 @@ func NewDiscoverer(log logr.Logger, manager *discovery.Manager, hook discoveryHo manager: manager, close: make(chan struct{}), configsMap: make(map[allocatorWatcher.EventSource]*config.Config), - jobToScrapeConfig: make(map[string]*config.ScrapeConfig), hook: hook, scrapeConfigsUpdater: scrapeConfigsUpdater, } @@ -67,26 +65,27 @@ func NewDiscoverer(log logr.Logger, manager *discovery.Manager, hook discoveryHo func (m *Discoverer) ApplyConfig(source allocatorWatcher.EventSource, cfg *config.Config) error { m.configsMap[source] = cfg + jobToScrapeConfig := make(map[string]*config.ScrapeConfig) discoveryCfg := make(map[string]discovery.Configs) relabelCfg := make(map[string][]*relabel.Config) for _, value := range m.configsMap { for _, scrapeConfig := range value.ScrapeConfigs { - m.jobToScrapeConfig[scrapeConfig.JobName] = scrapeConfig + jobToScrapeConfig[scrapeConfig.JobName] = scrapeConfig discoveryCfg[scrapeConfig.JobName] = scrapeConfig.ServiceDiscoveryConfigs relabelCfg[scrapeConfig.JobName] = scrapeConfig.RelabelConfigs } } - hash, err := hashstructure.Hash(m.jobToScrapeConfig, nil) + hash, err := hashstructure.Hash(jobToScrapeConfig, nil) if err != nil { return err } // If the hash has changed, updated stored hash and send the new config. // Otherwise skip updating scrape configs. if m.scrapeConfigsUpdater != nil && m.scrapeConfigsHash != hash { - err := m.scrapeConfigsUpdater.UpdateScrapeConfigResponse(m.jobToScrapeConfig) + err := m.scrapeConfigsUpdater.UpdateScrapeConfigResponse(jobToScrapeConfig) if err != nil { return err } diff --git a/cmd/otel-allocator/target/discovery_test.go b/cmd/otel-allocator/target/discovery_test.go index 6230c958a0..de2f1070e6 100644 --- a/cmd/otel-allocator/target/discovery_test.go +++ b/cmd/otel-allocator/target/discovery_test.go @@ -28,6 +28,7 @@ import ( "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/model/relabel" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ctrl "sigs.k8s.io/controller-runtime" "github.com/open-telemetry/opentelemetry-operator/cmd/otel-allocator/config" @@ -48,7 +49,7 @@ func TestDiscovery(t *testing.T) { args: args{ file: "./testdata/test.yaml", }, - want: []string{"prom.domain:9001", "prom.domain:9002", "prom.domain:9003", "promfile.domain:1001", "promfile.domain:3000"}, + want: []string{"prom.domain:9001", "prom.domain:9002", "prom.domain:9003", "prom.domain:8001", "promfile.domain:1001", "promfile.domain:3000"}, }, { name: "update", @@ -58,9 +59,10 @@ func TestDiscovery(t *testing.T) { want: []string{"prom.domain:9004", "prom.domain:9005", "promfile.domain:1001", "promfile.domain:3000"}, }, } + scu := &mockScrapeConfigUpdater{} ctx, cancelFunc := context.WithCancel(context.Background()) d := discovery.NewManager(ctx, gokitlog.NewNopLogger()) - manager := NewDiscoverer(ctrl.Log.WithName("test"), d, nil, nil) + manager := NewDiscoverer(ctrl.Log.WithName("test"), d, nil, scu) defer close(manager.close) defer cancelFunc() @@ -91,6 +93,13 @@ func TestDiscovery(t *testing.T) { sort.Strings(gotTargets) sort.Strings(tt.want) assert.Equal(t, tt.want, gotTargets) + + // check the updated scrape configs + expectedScrapeConfigs := map[string]*promconfig.ScrapeConfig{} + for _, scrapeConfig := range cfg.Config.ScrapeConfigs { + expectedScrapeConfigs[scrapeConfig.JobName] = scrapeConfig + } + assert.Equal(t, expectedScrapeConfigs, scu.mockCfg) }) } } @@ -286,6 +295,7 @@ func TestDiscovery_ScrapeConfigHashing(t *testing.T) { } var ( lastValidHash uint64 + expectedConfig map[string]*promconfig.ScrapeConfig lastValidConfig map[string]*promconfig.ScrapeConfig ) @@ -298,20 +308,25 @@ func TestDiscovery_ScrapeConfigHashing(t *testing.T) { t.Run(tc.description, func(t *testing.T) { err := manager.ApplyConfig(allocatorWatcher.EventSourcePrometheusCR, tc.cfg) if !tc.expectErr { + expectedConfig = make(map[string]*promconfig.ScrapeConfig) + for _, value := range manager.configsMap { + for _, scrapeConfig := range value.ScrapeConfigs { + expectedConfig[scrapeConfig.JobName] = scrapeConfig + } + } assert.NoError(t, err) assert.NotZero(t, manager.scrapeConfigsHash) // Assert that scrape configs in manager are correctly // reflected in the scrape job updater. - assert.Equal(t, manager.jobToScrapeConfig, scu.mockCfg) + assert.Equal(t, expectedConfig, scu.mockCfg) lastValidHash = manager.scrapeConfigsHash - lastValidConfig = manager.jobToScrapeConfig + lastValidConfig = expectedConfig } else { // In case of error, assert that we retain the last // known valid config. assert.Error(t, err) assert.Equal(t, lastValidHash, manager.scrapeConfigsHash) - assert.Equal(t, lastValidConfig, manager.jobToScrapeConfig) assert.Equal(t, lastValidConfig, scu.mockCfg) } @@ -319,6 +334,49 @@ func TestDiscovery_ScrapeConfigHashing(t *testing.T) { } } +func BenchmarkApplyScrapeConfig(b *testing.B) { + numConfigs := 1000 + scrapeConfig := promconfig.ScrapeConfig{ + JobName: "serviceMonitor/testapp/testapp/0", + HonorTimestamps: true, + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(30 * time.Second), + MetricsPath: "/metrics", + Scheme: "http", + HTTPClientConfig: commonconfig.HTTPClientConfig{ + FollowRedirects: true, + }, + RelabelConfigs: []*relabel.Config{ + { + SourceLabels: model.LabelNames{model.LabelName("job")}, + Separator: ";", + Regex: relabel.MustNewRegexp("(.*)"), + TargetLabel: "__tmp_prometheus_job_name", + Replacement: "$$1", + Action: relabel.Replace, + }, + }, + } + cfg := &promconfig.Config{ + ScrapeConfigs: make([]*promconfig.ScrapeConfig, numConfigs), + } + + for i := 0; i < numConfigs; i++ { + cfg.ScrapeConfigs[i] = &scrapeConfig + } + + scu := &mockScrapeConfigUpdater{} + ctx := context.Background() + d := discovery.NewManager(ctx, gokitlog.NewNopLogger()) + manager := NewDiscoverer(ctrl.Log.WithName("test"), d, nil, scu) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := manager.ApplyConfig(allocatorWatcher.EventSourcePrometheusCR, cfg) + require.NoError(b, err) + } +} + var _ scrapeConfigsUpdater = &mockScrapeConfigUpdater{} // mockScrapeConfigUpdater is a mock implementation of the scrapeConfigsUpdater. diff --git a/cmd/otel-allocator/target/testdata/test.yaml b/cmd/otel-allocator/target/testdata/test.yaml index b6d1cf2f7d..d38b0183a7 100644 --- a/cmd/otel-allocator/target/testdata/test.yaml +++ b/cmd/otel-allocator/target/testdata/test.yaml @@ -12,3 +12,6 @@ config: - targets: ["prom.domain:9001", "prom.domain:9002", "prom.domain:9003"] labels: my: label + - job_name: prometheus2 + static_configs: + - targets: ["prom.domain:8001"]