From 12cf59d52680a6abc1395cbb6788c3fde82c28e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9my=20Mathieu?= Date: Wed, 23 Jan 2019 11:25:08 +0100 Subject: [PATCH] [corechecks] Support custom tags in every corechecks (#2723) * [aggregator] store custom tags in a checkSender. This is mainly used to attach tags read in checks configuration to an instanciated sender. Those custom tags will be appended to each send (event, metric, service). * [aggregator] unit tests for custom tags in `checkSender`. * [corechecks] set custom tags read in check config to the sender. * [corechecks] unify the usage of checks custom tags. Some checks were already appending their custom tags to the metrics, events and services check sent. This commit removes this as it's now done by the `checkSender`. * [corechecks] comment about the tags configuration field while implementing a new core check. * [corechecks] missed some manually added custom tags in Docker check. * [corechecks] missed some manually added custom tags in kube & snmp unit tests. * [corechecks] missed some manually added custom tags in network (linux) and kube apiserver unit tests. * [corechecks] custom tags unit/inte tests changes due to new implementation. * [docs] add `tags` field into agent/config.md * [docs] revert a typo fix to avoid merge conflict. * Add release note entry. * [corechecks] remove some manually added custom tags in containerd check. * [corechecks] no need to checks for tags added in the configuration during tests. * Restore instance tags in integration tests. * Review feedbacks on comments and improvements. * Revert "Restore instance tags in integration tests." This reverts commit 17c2a76e9d04db7d217ab0cfaf9c4704e617550d. --- docs/agent/config.md | 1 + pkg/aggregator/mocksender/mocked_methods.go | 5 + pkg/aggregator/mocksender/mocksender.go | 1 + pkg/aggregator/sender.go | 14 +- pkg/aggregator/sender_test.go | 120 ++++++++++++++++++ pkg/autodiscovery/integration/config.go | 9 +- pkg/collector/corechecks/checkbase.go | 14 ++ .../cluster/kubernetes_apiserver.go | 4 +- .../cluster/kubernetes_apiserver_test.go | 24 ++-- .../cluster/kubernetes_openshift.go | 2 +- .../cluster/kubernetes_openshift_test.go | 4 +- .../corechecks/containers/containerd.go | 17 +-- .../corechecks/containers/containerd_test.go | 12 +- pkg/collector/corechecks/containers/cri.go | 1 - pkg/collector/corechecks/containers/docker.go | 37 +++--- .../corechecks/containers/docker_events.go | 2 - pkg/collector/corechecks/net/network.go | 29 ++--- pkg/collector/corechecks/net/network_test.go | 15 +-- pkg/collector/corechecks/net/snmp.go | 6 +- pkg/collector/corechecks/net/snmp_test.go | 18 --- pkg/collector/corechecks/system/disk.go | 12 -- pkg/collector/corechecks/system/disk_nix.go | 7 +- ...rechecks-custom-tags-7b55a07c4b763735.yaml | 5 + .../corechecks/docker/basemetrics_test.go | 2 - .../corechecks/docker/events_test.go | 1 - .../corechecks/docker/exitcode_test.go | 1 - .../corechecks/docker/globalmetrics_test.go | 2 +- .../corechecks/docker/main_test.go | 3 - .../corechecks/docker/serviceup_test.go | 4 +- 29 files changed, 232 insertions(+), 140 deletions(-) create mode 100644 releasenotes/notes/corechecks-custom-tags-7b55a07c4b763735.yaml diff --git a/docs/agent/config.md b/docs/agent/config.md index 1a1969b6e93a2..2f1130c73ec90 100644 --- a/docs/agent/config.md +++ b/docs/agent/config.md @@ -97,6 +97,7 @@ advanced options in the `instance` section: that should run less frequently than the default 15 seconds interval * `empty_default_hostname`: submit metrics, events and service checks with no hostname when set to `true` +* `tags`: send custom tags in addition to the tags sent by the check. ## Removed options diff --git a/pkg/aggregator/mocksender/mocked_methods.go b/pkg/aggregator/mocksender/mocked_methods.go index a9c8cd5aa5e29..b6928b8ccffb0 100644 --- a/pkg/aggregator/mocksender/mocked_methods.go +++ b/pkg/aggregator/mocksender/mocked_methods.go @@ -64,6 +64,11 @@ func (m *MockSender) Commit() { m.Called() } +//SetCheckCustomTags enables the set of check custom tags mock call. +func (m *MockSender) SetCheckCustomTags(tags []string) { + m.Called(tags) +} + //GetMetricStats enables the get metric stats mock call. func (m *MockSender) GetMetricStats() map[string]int64 { m.Called() diff --git a/pkg/aggregator/mocksender/mocksender.go b/pkg/aggregator/mocksender/mocksender.go index 1915e8134e81c..487ce998e03d5 100644 --- a/pkg/aggregator/mocksender/mocksender.go +++ b/pkg/aggregator/mocksender/mocksender.go @@ -51,6 +51,7 @@ func (m *MockSender) SetupAcceptAll() { m.On("Event", mock.AnythingOfType("metrics.Event")).Return() m.On("GetMetricStats", mock.AnythingOfType("map[string]int64")).Return() m.On("DisableDefaultHostname", mock.AnythingOfType("bool")).Return() + m.On("SetCheckCustomTags", mock.AnythingOfType("[]string")).Return() m.On("Commit").Return() } diff --git a/pkg/aggregator/sender.go b/pkg/aggregator/sender.go index effab53f42954..fd2788c3196af 100644 --- a/pkg/aggregator/sender.go +++ b/pkg/aggregator/sender.go @@ -35,6 +35,7 @@ type Sender interface { Event(e metrics.Event) GetMetricStats() map[string]int64 DisableDefaultHostname(disable bool) + SetCheckCustomTags(tags []string) } type metricStats struct { @@ -61,6 +62,7 @@ type checkSender struct { smsOut chan<- senderMetricSample serviceCheckOut chan<- metrics.ServiceCheck eventOut chan<- metrics.Event + checkTags []string } type senderMetricSample struct { @@ -152,6 +154,12 @@ func (s *checkSender) DisableDefaultHostname(disable bool) { s.defaultHostnameDisabled = disable } +// SetCheckCustomTags stores the tags set in the check configuration file. +// They will be appended to each send (metric, event and service) +func (s *checkSender) SetCheckCustomTags(tags []string) { + s.checkTags = tags +} + // Commit commits the metric samples that were added during a check run // Should be called at the end of every check run func (s *checkSender) Commit() { @@ -191,6 +199,8 @@ func (s *checkSender) SendRawMetricSample(sample *metrics.MetricSample) { } func (s *checkSender) sendMetricSample(metric string, value float64, hostname string, tags []string, mType metrics.MetricType) { + tags = append(tags, s.checkTags...) + log.Trace(mType.String(), " sample: ", metric, ": ", value, " for hostname: ", hostname, " tags: ", tags) metricSample := &metrics.MetricSample{ @@ -267,7 +277,7 @@ func (s *checkSender) ServiceCheck(checkName string, status metrics.ServiceCheck Status: status, Host: hostname, Ts: time.Now().Unix(), - Tags: tags, + Tags: append(tags, s.checkTags...), Message: message, } @@ -284,6 +294,8 @@ func (s *checkSender) ServiceCheck(checkName string, status metrics.ServiceCheck // Event submits an event func (s *checkSender) Event(e metrics.Event) { + e.Tags = append(e.Tags, s.checkTags...) + log.Trace("Event submitted: ", e.Title, " for hostname: ", e.Host, " tags: ", e.Tags) if e.Host == "" && !s.defaultHostnameDisabled { diff --git a/pkg/aggregator/sender_test.go b/pkg/aggregator/sender_test.go index 3fdbac5fcc438..6b8f8387842d9 100644 --- a/pkg/aggregator/sender_test.go +++ b/pkg/aggregator/sender_test.go @@ -10,6 +10,7 @@ import ( "fmt" "sync" "testing" + "time" // 3p @@ -133,6 +134,125 @@ func TestGetSenderDefaultHostname(t *testing.T) { assert.Equal(t, false, checksender.defaultHostnameDisabled) } +func TestGetSenderAddCheckCustomTagsMetrics(t *testing.T) { + resetAggregator() + InitAggregator(nil, "testhostname", "") + + senderMetricSampleChan := make(chan senderMetricSample, 10) + serviceCheckChan := make(chan metrics.ServiceCheck, 10) + eventChan := make(chan metrics.Event, 10) + checkSender := newCheckSender(checkID1, "", senderMetricSampleChan, serviceCheckChan, eventChan) + + // no custom tags + checkSender.sendMetricSample("metric.test", 42.0, "testhostname", nil, metrics.CounterType) + sms := <-senderMetricSampleChan + assert.Nil(t, sms.metricSample.Tags) + + // only tags added by the check + checkTags := []string{"check:tag1", "check:tag2"} + checkSender.sendMetricSample("metric.test", 42.0, "testhostname", checkTags, metrics.CounterType) + sms = <-senderMetricSampleChan + assert.Equal(t, checkTags, sms.metricSample.Tags) + + // simulate tags in the configuration file + customTags := []string{"custom:tag1", "custom:tag2"} + checkSender.SetCheckCustomTags(customTags) + assert.Len(t, checkSender.checkTags, 2) + + // only tags coming from the configuration file + checkSender.sendMetricSample("metric.test", 42.0, "testhostname", nil, metrics.CounterType) + sms = <-senderMetricSampleChan + assert.Equal(t, customTags, sms.metricSample.Tags) + + // tags added by the check + tags coming from the configuration file + checkSender.sendMetricSample("metric.test", 42.0, "testhostname", checkTags, metrics.CounterType) + sms = <-senderMetricSampleChan + assert.Equal(t, append(checkTags, customTags...), sms.metricSample.Tags) +} + +func TestGetSenderAddCheckCustomTagsService(t *testing.T) { + resetAggregator() + InitAggregator(nil, "testhostname", "") + + senderMetricSampleChan := make(chan senderMetricSample, 10) + serviceCheckChan := make(chan metrics.ServiceCheck, 10) + eventChan := make(chan metrics.Event, 10) + checkSender := newCheckSender(checkID1, "", senderMetricSampleChan, serviceCheckChan, eventChan) + + // no custom tags + checkSender.ServiceCheck("test", metrics.ServiceCheckOK, "testhostname", nil, "test message") + sc := <-serviceCheckChan + assert.Nil(t, sc.Tags) + + // only tags added by the check + checkTags := []string{"check:tag1", "check:tag2"} + checkSender.ServiceCheck("test", metrics.ServiceCheckOK, "testhostname", checkTags, "test message") + sc = <-serviceCheckChan + assert.Equal(t, checkTags, sc.Tags) + + // simulate tags in the configuration file + customTags := []string{"custom:tag1", "custom:tag2"} + checkSender.SetCheckCustomTags(customTags) + assert.Len(t, checkSender.checkTags, 2) + + // only tags coming from the configuration file + checkSender.ServiceCheck("test", metrics.ServiceCheckOK, "testhostname", nil, "test message") + sc = <-serviceCheckChan + assert.Equal(t, customTags, sc.Tags) + + // tags added by the check + tags coming from the configuration file + checkSender.ServiceCheck("test", metrics.ServiceCheckOK, "testhostname", checkTags, "test message") + sc = <-serviceCheckChan + assert.Equal(t, append(checkTags, customTags...), sc.Tags) +} + +func TestGetSenderAddCheckCustomTagsEvent(t *testing.T) { + resetAggregator() + InitAggregator(nil, "testhostname", "") + + senderMetricSampleChan := make(chan senderMetricSample, 10) + serviceCheckChan := make(chan metrics.ServiceCheck, 10) + eventChan := make(chan metrics.Event, 10) + checkSender := newCheckSender(checkID1, "", senderMetricSampleChan, serviceCheckChan, eventChan) + + event := metrics.Event{ + Title: "title", + Host: "testhostname", + Ts: time.Now().Unix(), + Text: "text", + Tags: nil, + } + + // no custom tags + checkSender.Event(event) + e := <-eventChan + assert.Nil(t, e.Tags) + + // only tags added by the check + checkTags := []string{"check:tag1", "check:tag2"} + event.Tags = checkTags + checkSender.Event(event) + e = <-eventChan + assert.Equal(t, checkTags, e.Tags) + + // simulate tags in the configuration file + customTags := []string{"custom:tag1", "custom:tag2"} + checkSender.SetCheckCustomTags(customTags) + assert.Len(t, checkSender.checkTags, 2) + + // only tags coming from the configuration file + event.Tags = nil + checkSender.Event(event) + e = <-eventChan + assert.Equal(t, customTags, e.Tags) + + // tags added by the check + tags coming from the configuration file + event.Tags = checkTags + checkSender.Event(event) + e = <-eventChan + assert.Equal(t, append(checkTags, customTags...), e.Tags) +} + func TestCheckSenderInterface(t *testing.T) { senderMetricSampleChan := make(chan senderMetricSample, 10) serviceCheckChan := make(chan metrics.ServiceCheck, 10) diff --git a/pkg/autodiscovery/integration/config.go b/pkg/autodiscovery/integration/config.go index 2c76e143182b2..5b4dce1b2b309 100644 --- a/pkg/autodiscovery/integration/config.go +++ b/pkg/autodiscovery/integration/config.go @@ -52,10 +52,11 @@ type Config struct { // CommonInstanceConfig holds the reserved fields for the yaml instance data type CommonInstanceConfig struct { - MinCollectionInterval int `yaml:"min_collection_interval"` - EmptyDefaultHostname bool `yaml:"empty_default_hostname"` - Name string `yaml:"name"` - Namespace string `yaml:"namespace"` + MinCollectionInterval int `yaml:"min_collection_interval"` + EmptyDefaultHostname bool `yaml:"empty_default_hostname"` + Tags []string `yaml:"tags"` + Name string `yaml:"name"` + Namespace string `yaml:"namespace"` } // Equal determines whether the passed config is the same diff --git a/pkg/collector/corechecks/checkbase.go b/pkg/collector/corechecks/checkbase.go index 15474ecd60246..77ef5f8fc8395 100644 --- a/pkg/collector/corechecks/checkbase.go +++ b/pkg/collector/corechecks/checkbase.go @@ -32,6 +32,9 @@ import ( // Integration warnings are handled via the Warn and Warnf methods // that forward the warning to the logger and send the warning to // the collector for display in the status page and the web UI. +// +// If custom tags are set in the instance configuration, they will +// be automatically appended to each send done by this check. type CheckBase struct { checkName string checkID check.ID @@ -84,6 +87,17 @@ func (c *CheckBase) CommonConfigure(instance integration.Data) error { } s.DisableDefaultHostname(true) } + + // Set custom tags configured for this check + if len(commonOptions.Tags) > 0 { + s, err := aggregator.GetSender(c.checkID) + if err != nil { + log.Errorf("failed to retrieve a sender for check %s: %s", string(c.ID()), err) + return err + } + s.SetCheckCustomTags(commonOptions.Tags) + } + return nil } diff --git a/pkg/collector/corechecks/cluster/kubernetes_apiserver.go b/pkg/collector/corechecks/cluster/kubernetes_apiserver.go index 42741a29a1b94..33467a8720c29 100644 --- a/pkg/collector/corechecks/cluster/kubernetes_apiserver.go +++ b/pkg/collector/corechecks/cluster/kubernetes_apiserver.go @@ -37,7 +37,6 @@ const ( // KubeASConfig is the config of the API server. type KubeASConfig struct { - Tags []string `yaml:"tags"` CollectEvent bool `yaml:"collect_events"` CollectOShiftQuotas bool `yaml:"collect_openshift_clusterquotas"` FilteredEventType []string `yaml:"filtered_event_types"` @@ -279,7 +278,7 @@ func (k *KubeASCheck) parseComponentStatus(sender aggregator.Sender, componentsS log.Debug("API Server component's structure is not expected") continue } - tagComp := append(k.instance.Tags, fmt.Sprintf("component:%s", component.Name)) + tagComp := []string{fmt.Sprintf("component:%s", component.Name)} for _, condition := range component.Conditions { status_check := metrics.ServiceCheckUnknown @@ -341,7 +340,6 @@ ITER_EVENTS: k.Warnf("Error while formatting bundled events, %s. Not submitting", err.Error()) continue } - datadogEv.Tags = append(datadogEv.Tags, k.instance.Tags...) sender.Event(datadogEv) } return nil diff --git a/pkg/collector/corechecks/cluster/kubernetes_apiserver_test.go b/pkg/collector/corechecks/cluster/kubernetes_apiserver_test.go index f273f7a7c3919..797e8b75fb29d 100644 --- a/pkg/collector/corechecks/cluster/kubernetes_apiserver_test.go +++ b/pkg/collector/corechecks/cluster/kubernetes_apiserver_test.go @@ -89,33 +89,31 @@ func TestParseComponentStatus(t *testing.T) { // FIXME: use the factory instead kubeASCheck := &KubeASCheck{ - instance: &KubeASConfig{ - Tags: []string{"test"}, - }, + instance: &KubeASConfig{}, CheckBase: core.NewCheckBase(kubernetesAPIServerCheckName), KubeAPIServerHostname: "hostname", } mocked := mocksender.NewMockSender(kubeASCheck.ID()) - mocked.On("ServiceCheck", "kube_apiserver_controlplane.up", metrics.ServiceCheckOK, "hostname", []string{"test", "component:Zookeeper"}, "") + mocked.On("ServiceCheck", "kube_apiserver_controlplane.up", metrics.ServiceCheckOK, "hostname", []string{"component:Zookeeper"}, "") kubeASCheck.parseComponentStatus(mocked, expected) mocked.AssertNumberOfCalls(t, "ServiceCheck", 1) - mocked.AssertServiceCheck(t, "kube_apiserver_controlplane.up", metrics.ServiceCheckOK, "hostname", []string{"test", "component:Zookeeper"}, "") + mocked.AssertServiceCheck(t, "kube_apiserver_controlplane.up", metrics.ServiceCheckOK, "hostname", []string{"component:Zookeeper"}, "") err := kubeASCheck.parseComponentStatus(mocked, unExpected) assert.EqualError(t, err, "metadata structure has changed. Not collecting API Server's Components status") mocked.AssertNotCalled(t, "ServiceCheck", "kube_apiserver_controlplane.up") - mocked.On("ServiceCheck", "kube_apiserver_controlplane.up", metrics.ServiceCheckCritical, "hostname", []string{"test", "component:ETCD"}, "") + mocked.On("ServiceCheck", "kube_apiserver_controlplane.up", metrics.ServiceCheckCritical, "hostname", []string{"component:ETCD"}, "") kubeASCheck.parseComponentStatus(mocked, unHealthy) mocked.AssertNumberOfCalls(t, "ServiceCheck", 2) - mocked.AssertServiceCheck(t, "kube_apiserver_controlplane.up", metrics.ServiceCheckCritical, "hostname", []string{"test", "component:ETCD"}, "") + mocked.AssertServiceCheck(t, "kube_apiserver_controlplane.up", metrics.ServiceCheckCritical, "hostname", []string{"component:ETCD"}, "") - mocked.On("ServiceCheck", "kube_apiserver_controlplane.up", metrics.ServiceCheckUnknown, "hostname", []string{"test", "component:DCA"}, "") + mocked.On("ServiceCheck", "kube_apiserver_controlplane.up", metrics.ServiceCheckUnknown, "hostname", []string{"component:DCA"}, "") kubeASCheck.parseComponentStatus(mocked, unknown) mocked.AssertNumberOfCalls(t, "ServiceCheck", 3) - mocked.AssertServiceCheck(t, "kube_apiserver_controlplane.up", metrics.ServiceCheckUnknown, "hostname", []string{"test", "component:DCA"}, "") + mocked.AssertServiceCheck(t, "kube_apiserver_controlplane.up", metrics.ServiceCheckUnknown, "hostname", []string{"component:DCA"}, "") empty_resp := kubeASCheck.parseComponentStatus(mocked, empty) assert.Nil(t, empty_resp, "metadata structure has changed. Not collecting API Server's Components status") @@ -159,7 +157,6 @@ func TestProcessBundledEvents(t *testing.T) { kubeASCheck := &KubeASCheck{ instance: &KubeASConfig{ - Tags: []string{"test"}, FilteredEventType: []string{"ignored"}, }, CheckBase: core.NewCheckBase(kubernetesAPIServerCheckName), @@ -195,7 +192,7 @@ func TestProcessBundledEvents(t *testing.T) { Title: "Events from the machine-blue Node", Text: "%%% \n30 **MissingClusterDNS**: MountVolume.SetUp succeeded\n \n _Events emitted by the kubelet seen at " + time.Unix(709675200, 0).String() + "_ \n\n %%%", Priority: "normal", - Tags: []string{"test", "namespace:default", "source_component:kubelet"}, + Tags: []string{"namespace:default", "source_component:kubelet"}, AggregationKey: "kubernetes_apiserver:e63e74fa-f566-11e7-9749-0e4863e1cbf4", SourceTypeName: "kubernetes", Ts: 709675200, @@ -223,7 +220,7 @@ func TestProcessBundledEvents(t *testing.T) { Title: "Events from the machine-blue Node", Text: "%%% \n30 **MissingClusterDNS**: MountVolume.SetUp succeeded\n \n _Events emitted by the kubelet seen at " + time.Unix(709675200, 0).String() + "_ \n\n %%%", Priority: "normal", - Tags: []string{"test", "namespace:default", "source_component:kubelet"}, + Tags: []string{"namespace:default", "source_component:kubelet"}, AggregationKey: "kubernetes_apiserver:e63e74fa-f566-11e7-9749-0e4863e1cbf4", SourceTypeName: "kubernetes", Ts: 709675200, @@ -248,7 +245,6 @@ func TestProcessEvent(t *testing.T) { kubeASCheck := &KubeASCheck{ instance: &KubeASConfig{ - Tags: []string{"test"}, FilteredEventType: []string{"ignored"}, }, CheckBase: core.NewCheckBase(kubernetesAPIServerCheckName), @@ -264,7 +260,7 @@ func TestProcessEvent(t *testing.T) { Title: "Events from the dca-789976f5d7-2ljx6 ReplicaSet", Text: "%%% \n2 **Scheduled**: Successfully assigned dca-789976f5d7-2ljx6 to ip-10-0-0-54\n \n _New events emitted by the default-scheduler seen at " + time.Unix(709662600000, 0).String() + "_ \n\n %%%", Priority: "normal", - Tags: []string{"test", "source_component:default-scheduler", "namespace:default"}, + Tags: []string{"source_component:default-scheduler", "namespace:default"}, AggregationKey: "kubernetes_apiserver:e6417a7f-f566-11e7-9749-0e4863e1cbf4", SourceTypeName: "kubernetes", Ts: 709662600, diff --git a/pkg/collector/corechecks/cluster/kubernetes_openshift.go b/pkg/collector/corechecks/cluster/kubernetes_openshift.go index decaab824e0ad..ea6949dfb9da1 100644 --- a/pkg/collector/corechecks/cluster/kubernetes_openshift.go +++ b/pkg/collector/corechecks/cluster/kubernetes_openshift.go @@ -51,7 +51,7 @@ func (k *KubeASCheck) retrieveOShiftClusterQuotas() ([]osq.ClusterResourceQuota, // reportClusterQuotas reports metrics on OpenShift ClusterResourceQuota objects func (k *KubeASCheck) reportClusterQuotas(quotas []osq.ClusterResourceQuota, sender aggregator.Sender) { for _, quota := range quotas { - quotaTags := append(k.instance.Tags, fmt.Sprintf("clusterquota:%s", quota.Name)) + quotaTags := []string{fmt.Sprintf("clusterquota:%s", quota.Name)} remaining := computeQuotaRemaining(quota.Status.Total.Used, quota.Status.Total.Hard) k.reportQuota(quota.Status.Total.Hard, "openshift.clusterquota", "limit", quotaTags, sender) diff --git a/pkg/collector/corechecks/cluster/kubernetes_openshift_test.go b/pkg/collector/corechecks/cluster/kubernetes_openshift_test.go index a13b729254129..6bfa41bf1d254 100644 --- a/pkg/collector/corechecks/cluster/kubernetes_openshift_test.go +++ b/pkg/collector/corechecks/cluster/kubernetes_openshift_test.go @@ -26,7 +26,7 @@ func TestReportClusterQuotas(t *testing.T) { json.Unmarshal(raw, &list) require.Len(t, list.Items, 1) - var instanceCfg = []byte("tags: [customtag]") + var instanceCfg = []byte("") var initCfg = []byte("") kubeASCheck := KubernetesASFactory().(*KubeASCheck) err = kubeASCheck.Configure(instanceCfg, initCfg) @@ -38,7 +38,7 @@ func TestReportClusterQuotas(t *testing.T) { mocked.AssertNumberOfCalls(t, "Gauge", 9*3) // Total - expectedTags := []string{"customtag", "clusterquota:multiproj-test"} + expectedTags := []string{"clusterquota:multiproj-test"} mocked.AssertMetric(t, "Gauge", "openshift.clusterquota.cpu.limit", 3.0, "", expectedTags) mocked.AssertMetric(t, "Gauge", "openshift.clusterquota.cpu.used", 0.6, "", expectedTags) diff --git a/pkg/collector/corechecks/containers/containerd.go b/pkg/collector/corechecks/containers/containerd.go index 66c18e0a0c3a7..cf82f4b3a173c 100644 --- a/pkg/collector/corechecks/containers/containerd.go +++ b/pkg/collector/corechecks/containers/containerd.go @@ -18,7 +18,7 @@ import ( "github.com/containerd/containerd/namespaces" "github.com/containerd/typeurl" "github.com/gogo/protobuf/types" - "gopkg.in/yaml.v2" + yaml "gopkg.in/yaml.v2" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/autodiscovery/integration" @@ -47,7 +47,6 @@ type ContainerdCheck struct { // ContainerdConfig contains the custom options and configurations set by the user. type ContainerdConfig struct { - Tags []string `yaml:"tags"` ContainerdFilters []string `yaml:"filters"` CollectEvents bool `yaml:"collect_events"` } @@ -104,11 +103,11 @@ func (c *ContainerdCheck) Run() error { // As we do not rely on a singleton, we ensure connectivity every check run. cu, errHealth := cutil.GetContainerdUtil() if errHealth != nil { - sender.ServiceCheck("containerd.health", metrics.ServiceCheckCritical, "", c.instance.Tags, fmt.Sprintf("Connectivity error %v", errHealth)) + sender.ServiceCheck("containerd.health", metrics.ServiceCheckCritical, "", nil, fmt.Sprintf("Connectivity error %v", errHealth)) log.Infof("Error ensuring connectivity with Containerd daemon %v", errHealth) return errHealth } - sender.ServiceCheck("containerd.health", metrics.ServiceCheckOK, "", c.instance.Tags, "") + sender.ServiceCheck("containerd.health", metrics.ServiceCheckOK, "", nil, "") ns := cu.Namespace() if c.instance.CollectEvents { @@ -122,17 +121,17 @@ func (c *ContainerdCheck) Run() error { } events := c.sub.Flush(time.Now().Unix()) // Process events - computeEvents(events, sender, c.instance.Tags, c.filters) + computeEvents(events, sender, c.filters) } nk := namespaces.WithNamespace(context.Background(), ns) - computeMetrics(sender, nk, cu, c.instance.Tags, c.filters) + computeMetrics(sender, nk, cu, c.filters) return nil } // compute events converts Containerd events into Datadog events -func computeEvents(events []containerdEvent, sender aggregator.Sender, userTags []string, fil *containers.Filter) { +func computeEvents(events []containerdEvent, sender aggregator.Sender, fil *containers.Filter) { for _, e := range events { split := strings.Split(e.Topic, "/") if len(split) != 3 { @@ -157,7 +156,6 @@ func computeEvents(events []containerdEvent, sender aggregator.Sender, userTags output.Tags = append(output.Tags, fmt.Sprintf("%s:%s", k, v)) } } - output.Tags = append(output.Tags, userTags...) output.Ts = e.Timestamp.Unix() output.Title = fmt.Sprintf("Event on %s from Containerd", split[1]) if split[1] == "containers" || split[1] == "tasks" { @@ -173,7 +171,7 @@ func computeEvents(events []containerdEvent, sender aggregator.Sender, userTags } } -func computeMetrics(sender aggregator.Sender, nk context.Context, cu cutil.ContainerdItf, userTags []string, fil *containers.Filter) { +func computeMetrics(sender aggregator.Sender, nk context.Context, cu cutil.ContainerdItf, fil *containers.Filter) { containers, err := cu.Containers() if err != nil { log.Errorf(err.Error()) @@ -194,7 +192,6 @@ func computeMetrics(sender aggregator.Sender, nk context.Context, cu cutil.Conta if err != nil { log.Errorf("Could not collect tags for container %s: %s", ctn.ID()[:12], err) } - tags = append(tags, userTags...) // Tagger tags taggerTags, err := tagger.Tag(ctn.ID(), collectors.HighCardinality) if err != nil { diff --git a/pkg/collector/corechecks/containers/containerd_test.go b/pkg/collector/corechecks/containers/containerd_test.go index e2fa0d2a0db68..f70e022e51efc 100644 --- a/pkg/collector/corechecks/containers/containerd_test.go +++ b/pkg/collector/corechecks/containers/containerd_test.go @@ -140,9 +140,7 @@ func TestCollectTags(t *testing.T) { // TestComputeEvents checks the conversion of Containerd events to Datadog events func TestComputeEvents(t *testing.T) { containerdCheck := &ContainerdCheck{ - instance: &ContainerdConfig{ - Tags: []string{"test"}, - }, + instance: &ContainerdConfig{}, CheckBase: corechecks.NewCheckBase("containerd"), } mocked := mocksender.NewMockSender(containerdCheck.ID()) @@ -187,7 +185,7 @@ func TestComputeEvents(t *testing.T) { }, }, expectedTitle: "Event on containers from Containerd", - expectedTags: []string{"foo:bar", "test"}, + expectedTags: []string{"foo:bar"}, numberEvents: 1, }, { @@ -201,7 +199,7 @@ func TestComputeEvents(t *testing.T) { }, }, expectedTitle: "Event on images from Containerd", - expectedTags: []string{"foo:baz", "test"}, + expectedTags: []string{"foo:baz"}, numberEvents: 1, }, { @@ -215,13 +213,13 @@ func TestComputeEvents(t *testing.T) { }, }, expectedTitle: "Event on images from Containerd", - expectedTags: []string{}, + expectedTags: nil, numberEvents: 0, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - computeEvents(test.events, mocked, containerdCheck.instance.Tags, containerdCheck.filters) + computeEvents(test.events, mocked, containerdCheck.filters) mocked.On("Event", mock.AnythingOfType("metrics.Event")) if len(mocked.Calls) > 0 { res := (mocked.Calls[0].Arguments.Get(0)).(metrics.Event) diff --git a/pkg/collector/corechecks/containers/cri.go b/pkg/collector/corechecks/containers/cri.go index f7056e4b888fc..7db8990939e4f 100644 --- a/pkg/collector/corechecks/containers/cri.go +++ b/pkg/collector/corechecks/containers/cri.go @@ -104,7 +104,6 @@ func (c *CRICheck) processContainerStats(sender aggregator.Sender, runtime strin log.Errorf("Could not collect tags for container %s: %s", cid[:12], err) } tags = append(tags, "runtime:"+runtime) - tags = append(tags, c.instance.Tags...) sender.Gauge("cri.mem.rss", float64(stats.GetMemory().GetWorkingSetBytes().GetValue()), "", tags) // Cumulative CPU usage (sum across all cores) since object creation. sender.Rate("cri.cpu.usage", float64(stats.GetCpu().GetUsageCoreNanoSeconds().GetValue()), "", tags) diff --git a/pkg/collector/corechecks/containers/docker.go b/pkg/collector/corechecks/containers/docker.go index ec2e87fbc906f..9a83b958584d3 100644 --- a/pkg/collector/corechecks/containers/docker.go +++ b/pkg/collector/corechecks/containers/docker.go @@ -44,7 +44,7 @@ type DockerConfig struct { CollectImageSize bool `yaml:"collect_image_size"` CollectDiskStats bool `yaml:"collect_disk_stats"` CollectVolumeCount bool `yaml:"collect_volume_count"` - Tags []string `yaml:"tags"` + Tags []string `yaml:"tags"` // Used only by the configuration converter v5 → v6 CollectEvent bool `yaml:"collect_events"` FilteredEventType []string `yaml:"filtered_event_types"` CappedMetrics map[string]float64 `yaml:"capped_metrics"` @@ -141,14 +141,14 @@ func (d *DockerCheck) countAndWeightImages(sender aggregator.Sender, du *docker. log.Errorf("Could not parse image name and tag, RepoTag is: %s", i.RepoTags[0]) continue } - tags := append(d.instance.Tags, fmt.Sprintf("image_name:%s", name), fmt.Sprintf("image_tag:%s", tag)) + tags := []string{fmt.Sprintf("image_name:%s", name), fmt.Sprintf("image_tag:%s", tag)} sender.Gauge("docker.image.virtual_size", float64(i.VirtualSize), "", tags) sender.Gauge("docker.image.size", float64(i.Size), "", tags) } } - sender.Gauge("docker.images.available", float64(len(availableImages)), "", d.instance.Tags) - sender.Gauge("docker.images.intermediate", float64(len(allImages)-len(availableImages)), "", d.instance.Tags) + sender.Gauge("docker.images.available", float64(len(availableImages)), "", nil) + sender.Gauge("docker.images.intermediate", float64(len(allImages)-len(availableImages)), "", nil) return nil } @@ -161,13 +161,13 @@ func (d *DockerCheck) Run() error { du, err := docker.GetDockerUtil() if err != nil { - sender.ServiceCheck(DockerServiceUp, metrics.ServiceCheckCritical, "", d.instance.Tags, err.Error()) + sender.ServiceCheck(DockerServiceUp, metrics.ServiceCheckCritical, "", nil, err.Error()) d.Warnf("Error initialising check: %s", err) return err } cList, err := du.ListContainers(&docker.ContainerListConfig{IncludeExited: true, FlagExcluded: true}) if err != nil { - sender.ServiceCheck(DockerServiceUp, metrics.ServiceCheckCritical, "", d.instance.Tags, err.Error()) + sender.ServiceCheck(DockerServiceUp, metrics.ServiceCheckCritical, "", nil, err.Error()) d.Warnf("Error collecting containers: %s", err) return err } @@ -184,7 +184,6 @@ func (d *DockerCheck) Run() error { if err != nil { log.Errorf("Could not collect tags for container %s: %s", c.ID[:12], err) } - tags = append(tags, d.instance.Tags...) if c.CPU != nil { sender.Rate("docker.cpu.system", float64(c.CPU.System), "", tags) @@ -265,21 +264,21 @@ func (d *DockerCheck) Run() error { var totalRunning, totalStopped int64 for _, image := range images { - sender.Gauge("docker.containers.running", float64(image.running), "", append(d.instance.Tags, image.tags...)) + sender.Gauge("docker.containers.running", float64(image.running), "", image.tags) totalRunning += image.running - sender.Gauge("docker.containers.stopped", float64(image.stopped), "", append(d.instance.Tags, image.tags...)) + sender.Gauge("docker.containers.stopped", float64(image.stopped), "", image.tags) totalStopped += image.stopped } - sender.Gauge("docker.containers.running.total", float64(totalRunning), "", d.instance.Tags) - sender.Gauge("docker.containers.stopped.total", float64(totalStopped), "", d.instance.Tags) + sender.Gauge("docker.containers.running.total", float64(totalRunning), "", nil) + sender.Gauge("docker.containers.stopped.total", float64(totalStopped), "", nil) if err := d.countAndWeightImages(sender, du); err != nil { log.Error(err.Error()) - sender.ServiceCheck(DockerServiceUp, metrics.ServiceCheckCritical, "", d.instance.Tags, err.Error()) + sender.ServiceCheck(DockerServiceUp, metrics.ServiceCheckCritical, "", nil, err.Error()) d.Warnf("Error collecting images: %s", err) return err } - sender.ServiceCheck(DockerServiceUp, metrics.ServiceCheckOK, "", d.instance.Tags, "") + sender.ServiceCheck(DockerServiceUp, metrics.ServiceCheckOK, "", nil, "") if d.instance.CollectEvent || d.instance.CollectExitCodes { events, err := d.retrieveEvents(du) @@ -312,17 +311,17 @@ func (d *DockerCheck) Run() error { continue } if stat.Free != nil { - sender.Gauge(fmt.Sprintf("docker.%s.free", stat.Name), float64(*stat.Free), "", d.instance.Tags) + sender.Gauge(fmt.Sprintf("docker.%s.free", stat.Name), float64(*stat.Free), "", nil) } if stat.Used != nil { - sender.Gauge(fmt.Sprintf("docker.%s.used", stat.Name), float64(*stat.Used), "", d.instance.Tags) + sender.Gauge(fmt.Sprintf("docker.%s.used", stat.Name), float64(*stat.Used), "", nil) } if stat.Total != nil { - sender.Gauge(fmt.Sprintf("docker.%s.total", stat.Name), float64(*stat.Total), "", d.instance.Tags) + sender.Gauge(fmt.Sprintf("docker.%s.total", stat.Name), float64(*stat.Total), "", nil) } percent := stat.GetPercentUsed() if !math.IsNaN(percent) { - sender.Gauge(fmt.Sprintf("docker.%s.percent", stat.Name), percent, "", d.instance.Tags) + sender.Gauge(fmt.Sprintf("docker.%s.percent", stat.Name), percent, "", nil) } } } @@ -333,8 +332,8 @@ func (d *DockerCheck) Run() error { if err != nil { d.Warnf("Error collecting volume stats: %s", err) } else { - sender.Gauge("docker.volume.count", float64(attached), "", append(d.instance.Tags, "volume_state:attached")) - sender.Gauge("docker.volume.count", float64(dangling), "", append(d.instance.Tags, "volume_state:dangling")) + sender.Gauge("docker.volume.count", float64(attached), "", []string{"volume_state:attached"}) + sender.Gauge("docker.volume.count", float64(dangling), "", []string{"volume_state:dangling"}) } } diff --git a/pkg/collector/corechecks/containers/docker_events.go b/pkg/collector/corechecks/containers/docker_events.go index 30d3b57a9f241..f048f1a558d49 100644 --- a/pkg/collector/corechecks/containers/docker_events.go +++ b/pkg/collector/corechecks/containers/docker_events.go @@ -64,7 +64,6 @@ func (d *DockerCheck) reportExitCodes(events []*docker.ContainerEvent, sender ag status = metrics.ServiceCheckCritical } tags, err := tagger.Tag(ev.ContainerEntityName(), collectors.HighCardinality) - tags = append(tags, d.instance.Tags...) if err != nil { log.Debugf("no tags for %s: %s", ev.ContainerID, err) } @@ -83,7 +82,6 @@ func (d *DockerCheck) reportEvents(events []*docker.ContainerEvent, sender aggre if err != nil { log.Warnf("can't submit event: %s", err) } else { - ev.Tags = append(ev.Tags, d.instance.Tags...) sender.Event(ev) } } diff --git a/pkg/collector/corechecks/net/network.go b/pkg/collector/corechecks/net/network.go index 2c825ef4ddd4f..7b3fba71044d7 100644 --- a/pkg/collector/corechecks/net/network.go +++ b/pkg/collector/corechecks/net/network.go @@ -77,7 +77,6 @@ type NetworkCheck struct { } type networkInstanceConfig struct { - CustomTags []string `yaml:"tags"` CollectConnectionState bool `yaml:"collect_connection_state"` ExcludedInterfaces []string `yaml:"excluded_interfaces"` ExcludedInterfaceRe string `yaml:"excluded_interface_re"` @@ -129,7 +128,7 @@ func (c *NetworkCheck) Run() error { } for _, interfaceIO := range ioByInterface { if !c.isDeviceExcluded(interfaceIO.Name) { - submitInterfaceMetrics(sender, interfaceIO, c.config.instance.CustomTags) + submitInterfaceMetrics(sender, interfaceIO) } } @@ -150,7 +149,7 @@ func (c *NetworkCheck) Run() error { } } } - submitProtocolMetrics(sender, protocolStats, c.config.instance.CustomTags) + submitProtocolMetrics(sender, protocolStats) } if c.config.instance.CollectConnectionState { @@ -158,25 +157,25 @@ func (c *NetworkCheck) Run() error { if err != nil { return err } - submitConnectionsMetrics(sender, "udp4", udpStateMetricsSuffixMapping, connectionsStats, c.config.instance.CustomTags) + submitConnectionsMetrics(sender, "udp4", udpStateMetricsSuffixMapping, connectionsStats) connectionsStats, err = c.net.Connections("udp6") if err != nil { return err } - submitConnectionsMetrics(sender, "udp6", udpStateMetricsSuffixMapping, connectionsStats, c.config.instance.CustomTags) + submitConnectionsMetrics(sender, "udp6", udpStateMetricsSuffixMapping, connectionsStats) connectionsStats, err = c.net.Connections("tcp4") if err != nil { return err } - submitConnectionsMetrics(sender, "tcp4", tcpStateMetricsSuffixMapping, connectionsStats, c.config.instance.CustomTags) + submitConnectionsMetrics(sender, "tcp4", tcpStateMetricsSuffixMapping, connectionsStats) connectionsStats, err = c.net.Connections("tcp6") if err != nil { return err } - submitConnectionsMetrics(sender, "tcp6", tcpStateMetricsSuffixMapping, connectionsStats, c.config.instance.CustomTags) + submitConnectionsMetrics(sender, "tcp6", tcpStateMetricsSuffixMapping, connectionsStats) } sender.Commit() @@ -195,8 +194,8 @@ func (c *NetworkCheck) isDeviceExcluded(deviceName string) bool { return false } -func submitInterfaceMetrics(sender aggregator.Sender, interfaceIO net.IOCountersStat, additionalTags []string) { - tags := append(additionalTags, fmt.Sprintf("device:%s", interfaceIO.Name)) +func submitInterfaceMetrics(sender aggregator.Sender, interfaceIO net.IOCountersStat) { + tags := []string{fmt.Sprintf("device:%s", interfaceIO.Name)} sender.Rate("system.net.bytes_rcvd", float64(interfaceIO.BytesRecv), "", tags) sender.Rate("system.net.bytes_sent", float64(interfaceIO.BytesSent), "", tags) sender.Rate("system.net.packets_in.count", float64(interfaceIO.PacketsRecv), "", tags) @@ -205,19 +204,18 @@ func submitInterfaceMetrics(sender aggregator.Sender, interfaceIO net.IOCounters sender.Rate("system.net.packets_out.error", float64(interfaceIO.Errout), "", tags) } -func submitProtocolMetrics(sender aggregator.Sender, protocolStats net.ProtoCountersStat, additionalTags []string) { +func submitProtocolMetrics(sender aggregator.Sender, protocolStats net.ProtoCountersStat) { if protocolMapping, ok := protocolsMetricsMapping[protocolStats.Protocol]; ok { for rawMetricName, metricName := range protocolMapping { if metricValue, ok := protocolStats.Stats[rawMetricName]; ok { - sender.Rate(metricName, float64(metricValue), "", additionalTags) - sender.MonotonicCount(fmt.Sprintf("%s.count", metricName), float64(metricValue), "", - additionalTags) + sender.Rate(metricName, float64(metricValue), "", nil) + sender.MonotonicCount(fmt.Sprintf("%s.count", metricName), float64(metricValue), "", nil) } } } } -func submitConnectionsMetrics(sender aggregator.Sender, protocolName string, stateMetricSuffixMapping map[string]string, connectionsStats []net.ConnectionStat, additionalTags []string) { +func submitConnectionsMetrics(sender aggregator.Sender, protocolName string, stateMetricSuffixMapping map[string]string, connectionsStats []net.ConnectionStat) { metricCount := map[string]float64{} for _, suffix := range stateMetricSuffixMapping { metricCount[suffix] = 0 @@ -228,8 +226,7 @@ func submitConnectionsMetrics(sender aggregator.Sender, protocolName string, sta } for suffix, count := range metricCount { - sender.Gauge(fmt.Sprintf("system.net.%s.%s", protocolName, suffix), - count, "", additionalTags) + sender.Gauge(fmt.Sprintf("system.net.%s.%s", protocolName, suffix), count, "", nil) } } diff --git a/pkg/collector/corechecks/net/network_test.go b/pkg/collector/corechecks/net/network_test.go index 8054ed875b8d8..91a3b5a792e58 100644 --- a/pkg/collector/corechecks/net/network_test.go +++ b/pkg/collector/corechecks/net/network_test.go @@ -79,10 +79,6 @@ excluded_interfaces: - eth0 - lo0 excluded_interface_re: "eth.*" - -tags: - - "a:test1" - - "b:test2" `) err := check.Configure(rawInstanceConfig, []byte(``)) @@ -90,7 +86,6 @@ tags: assert.Equal(t, true, check.config.instance.CollectConnectionState) assert.ElementsMatch(t, []string{"eth0", "lo0"}, check.config.instance.ExcludedInterfaces) assert.Equal(t, "eth.*", check.config.instance.ExcludedInterfaceRe) - assert.ElementsMatch(t, []string{"a:test1", "b:test2"}, check.config.instance.CustomTags) } func TestNetworkCheck(t *testing.T) { @@ -269,9 +264,6 @@ func TestNetworkCheck(t *testing.T) { rawInstanceConfig := []byte(` collect_connection_state: true -tags: - - "test:tag" - - "test:another_tag" `) err := networkCheck.Configure(rawInstanceConfig, []byte(``)) @@ -287,8 +279,9 @@ tags: err = networkCheck.Run() assert.Nil(t, err) - customTags := []string{"test:tag", "test:another_tag"} - eth0Tags := append(customTags, "device:eth0") + var customTags []string + + eth0Tags := []string{"device:eth0"} mockSender.AssertCalled(t, "Rate", "system.net.bytes_rcvd", float64(10), "", eth0Tags) mockSender.AssertCalled(t, "Rate", "system.net.bytes_sent", float64(11), "", eth0Tags) mockSender.AssertCalled(t, "Rate", "system.net.packets_in.count", float64(12), "", eth0Tags) @@ -296,7 +289,7 @@ tags: mockSender.AssertCalled(t, "Rate", "system.net.packets_out.count", float64(14), "", eth0Tags) mockSender.AssertCalled(t, "Rate", "system.net.packets_out.error", float64(15), "", eth0Tags) - lo0Tags := append(customTags, "device:lo0") + lo0Tags := []string{"device:lo0"} mockSender.AssertCalled(t, "Rate", "system.net.bytes_rcvd", float64(16), "", lo0Tags) mockSender.AssertCalled(t, "Rate", "system.net.bytes_sent", float64(17), "", lo0Tags) mockSender.AssertCalled(t, "Rate", "system.net.packets_in.count", float64(18), "", lo0Tags) diff --git a/pkg/collector/corechecks/net/snmp.go b/pkg/collector/corechecks/net/snmp.go index d4282e70870d7..35c4a54051080 100644 --- a/pkg/collector/corechecks/net/snmp.go +++ b/pkg/collector/corechecks/net/snmp.go @@ -85,11 +85,11 @@ type snmpInstanceCfg struct { Timeout uint `yaml:"timeout,omitempty"` Retries uint `yaml:"retries,omitempty"` Metrics []metric `yaml:"metrics,omitempty"` - Tags []string `yaml:"tags,omitempty"` OIDTranslator *util.BiMap `yaml:",omitempty"` //will not be in yaml NameLookup map[string]string `yaml:",omitempty"` //will not be in yaml MetricMap map[string]*metric `yaml:",omitempty"` //will not be in yaml TagMap map[string][]*metricTag `yaml:",omitempty"` //will not be in yaml + snmpDeviceTag string `yaml:",omitempty"` //will not be in yaml snmp *snmpgo.SNMP } @@ -426,7 +426,7 @@ func (c *snmpConfig) parse(data []byte, initData []byte) error { tagbuff.WriteString(":") tagbuff.WriteString(fmt.Sprintf("%d", c.instance.Port)) - c.instance.Tags = append(c.instance.Tags, tagbuff.String()) + c.instance.snmpDeviceTag = tagbuff.String() //security - make sure we're backward compatible switch c.instance.AuthProtocol { @@ -507,7 +507,7 @@ func (c *SNMPCheck) submitSNMP(oids snmpgo.Oids, vbs snmpgo.VarBinds) error { } //set tag - tagbundle := append([]string(nil), c.cfg.instance.Tags...) + tagbundle := []string{c.cfg.instance.snmpDeviceTag} for _, entry := range registry { tagbuff.Reset() if entry.Tag.Column != "" && len(entry.Varbinds) > 0 { diff --git a/pkg/collector/corechecks/net/snmp_test.go b/pkg/collector/corechecks/net/snmp_test.go index e6f6e70da9711..5f76425ad9d88 100644 --- a/pkg/collector/corechecks/net/snmp_test.go +++ b/pkg/collector/corechecks/net/snmp_test.go @@ -163,22 +163,6 @@ func TestConfigureV2(t *testing.T) { if cfg.instance.Retries != 5 { t.Fatalf("Failed retries: expected '5' got '%v'", cfg.instance.Retries) } - - tag1Found := false - tag2Found := false - - for _, tag := range cfg.instance.Tags { - if tag == "optional_tag_1" { - tag1Found = true - } - if tag == "optional_tag_2" { - tag2Found = true - } - } - - if !tag1Found || !tag2Found { - t.Fatalf("Instance tags not properly unmarshalled.") - } } func TestConfigureV3(t *testing.T) { @@ -254,8 +238,6 @@ func TestSubmitSNMP(t *testing.T) { } expectedTags := []string{ - "optional_tag_1", - "optional_tag_2", "snmp_device:localhost:161"} for _, oid := range oids { diff --git a/pkg/collector/corechecks/system/disk.go b/pkg/collector/corechecks/system/disk.go index b833a4e611564..d795ef2096570 100644 --- a/pkg/collector/corechecks/system/disk.go +++ b/pkg/collector/corechecks/system/disk.go @@ -32,7 +32,6 @@ type diskConfig struct { excludedMountpointRe *regexp.Regexp allPartitions bool deviceTagRe map[*regexp.Regexp][]string - customTags []string } func (c *DiskCheck) excludeDisk(mountpoint, device, fstype string) bool { @@ -147,16 +146,6 @@ func (c *DiskCheck) instanceConfigure(data integration.Data) error { } } - tags, found := conf["tags"] - if tags, ok := tags.([]interface{}); found && ok { - c.cfg.customTags = make([]string, 0, len(tags)) - for _, tag := range tags { - if tag, ok := tag.(string); ok { - c.cfg.customTags = append(c.cfg.customTags, tag) - } - } - } - return nil } @@ -179,7 +168,6 @@ func (c *DiskCheck) applyDeviceTags(device, mountpoint string, tags []string) [] for _, tag := range deviceTags { tags = append(tags, tag) } - } } return tags diff --git a/pkg/collector/corechecks/system/disk_nix.go b/pkg/collector/corechecks/system/disk_nix.go index 8f215edbed0e5..6fb577ab373e6 100644 --- a/pkg/collector/corechecks/system/disk_nix.go +++ b/pkg/collector/corechecks/system/disk_nix.go @@ -73,8 +73,7 @@ func (c *DiskCheck) collectPartitionMetrics(sender aggregator.Sender) error { continue } - tags := make([]string, len(c.cfg.customTags), len(c.cfg.customTags)+2) - copy(tags, c.cfg.customTags) + tags := make([]string, 0, 2) if c.cfg.tagByFilesystem { tags = append(tags, partition.Fstype, fmt.Sprintf("filesystem:%s", partition.Fstype)) @@ -102,9 +101,7 @@ func (c *DiskCheck) collectDiskMetrics(sender aggregator.Sender) error { } for deviceName, ioCounter := range iomap { - tags := make([]string, len(c.cfg.customTags), len(c.cfg.customTags)+1) - copy(tags, c.cfg.customTags) - tags = append(tags, fmt.Sprintf("device:%s", deviceName)) + tags := []string{fmt.Sprintf("device:%s", deviceName)} tags = c.applyDeviceTags(deviceName, "", tags) diff --git a/releasenotes/notes/corechecks-custom-tags-7b55a07c4b763735.yaml b/releasenotes/notes/corechecks-custom-tags-7b55a07c4b763735.yaml new file mode 100644 index 0000000000000..5f433dfd0780f --- /dev/null +++ b/releasenotes/notes/corechecks-custom-tags-7b55a07c4b763735.yaml @@ -0,0 +1,5 @@ +--- +enhancements: + - | + Each corecheck could now send custom tags using + the ``tags`` field in its configuration file. diff --git a/test/integration/corechecks/docker/basemetrics_test.go b/test/integration/corechecks/docker/basemetrics_test.go index dd03749d5d065..27a473a3eb201 100644 --- a/test/integration/corechecks/docker/basemetrics_test.go +++ b/test/integration/corechecks/docker/basemetrics_test.go @@ -17,7 +17,6 @@ func init() { func TestContainerMetricsTagging(t *testing.T) { expectedTags := []string{ - instanceTag, // Instance tags "container_name:basemetrics_redis_1", // Container name "docker_image:datadog/docker-library:redis_3_2_11-alpine", "image_name:datadog/docker-library", @@ -50,7 +49,6 @@ func TestContainerMetricsTagging(t *testing.T) { }, } pauseTags := []string{ - "instanceTag:MustBeHere", "docker_image:kubernetes/pause:latest", "image_name:kubernetes/pause", "image_tag:latest", diff --git a/test/integration/corechecks/docker/events_test.go b/test/integration/corechecks/docker/events_test.go index 8168a3f621513..1921194347fe0 100644 --- a/test/integration/corechecks/docker/events_test.go +++ b/test/integration/corechecks/docker/events_test.go @@ -22,7 +22,6 @@ func init() { func TestEvents(t *testing.T) { nowTimestamp := time.Now().Unix() expectedTags := []string{ - instanceTag, "highcardlabeltag:eventhigh", "lowcardlabeltag:eventlow", "highcardenvtag:eventhighenv", diff --git a/test/integration/corechecks/docker/exitcode_test.go b/test/integration/corechecks/docker/exitcode_test.go index 87b4bc0609456..a07f8ad7161c4 100644 --- a/test/integration/corechecks/docker/exitcode_test.go +++ b/test/integration/corechecks/docker/exitcode_test.go @@ -17,7 +17,6 @@ func init() { func TestContainerExit(t *testing.T) { expectedTags := []string{ - instanceTag, "docker_image:datadog/docker-library:busybox_1_28_0", "image_name:datadog/docker-library", "short_image:docker-library", diff --git a/test/integration/corechecks/docker/globalmetrics_test.go b/test/integration/corechecks/docker/globalmetrics_test.go index ffc04ecdd3b78..6cbb016788294 100644 --- a/test/integration/corechecks/docker/globalmetrics_test.go +++ b/test/integration/corechecks/docker/globalmetrics_test.go @@ -16,7 +16,7 @@ func init() { } func TestGlobalMetrics(t *testing.T) { - expectedTags := []string{instanceTag} + expectedTags := []string{} sender.AssertCalled(t, "Gauge", "docker.images.available", mocksender.IsGreaterOrEqual(2), "", mocksender.MatchTagsContains(expectedTags)) sender.AssertCalled(t, "Gauge", "docker.images.intermediate", mocksender.IsGreaterOrEqual(0), "", mocksender.MatchTagsContains(expectedTags)) diff --git a/test/integration/corechecks/docker/main_test.go b/test/integration/corechecks/docker/main_test.go index a317b82de07df..096777f868acb 100644 --- a/test/integration/corechecks/docker/main_test.go +++ b/test/integration/corechecks/docker/main_test.go @@ -26,9 +26,6 @@ var retryDelay = flag.Int("retry-delay", 1, "time to wait between retries (defau var retryTimeout = flag.Int("retry-timeout", 30, "maximum time before failure (default 30 seconds)") var skipCleanup = flag.Bool("skip-cleanup", false, "skip cleanup of the docker containers (for debugging)") -// Must be repeated in the following dockerCfgString -const instanceTag = "instanceTag:MustBeHere" - var dockerCfgString = ` collect_events: true collect_container_size: true diff --git a/test/integration/corechecks/docker/serviceup_test.go b/test/integration/corechecks/docker/serviceup_test.go index 79a04411a0a92..1a6b11caa37e8 100644 --- a/test/integration/corechecks/docker/serviceup_test.go +++ b/test/integration/corechecks/docker/serviceup_test.go @@ -12,9 +12,7 @@ import ( ) func TestServiceUp(t *testing.T) { - expectedTags := []string{ - instanceTag, - } + expectedTags := []string{} sender.AssertServiceCheck(t, "docker.service_up", metrics.ServiceCheckOK, "", expectedTags, "") }