Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Metricbeat autodiscover][Provider Kubernetes] Add condition to node/namespace watchers #37181

Closed
wants to merge 6 commits into from
Closed
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG-developer.next.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ The list below covers the major changes between 7.0.0-rc2 and main only.

==== Bugfixes

- Stop namespace and node watchers from starting on metricbeat autodiscover if `add_resource_metadata` is disabled.{pull}37181[37181]
constanca-m marked this conversation as resolved.
Show resolved Hide resolved
- Fix how Prometheus histograms are calculated when percentiles are provide.{pull}36537[36537]
- Stop using `mage:import` in community beats. This was ignoring the vendorized beats directory for some mage targets, using the code available in GOPATH, this causes inconsistencies and compilation problems if the version of the code in the GOPATH is different to the vendored one. Use of `mage:import` will continue to be unsupported in custom beats till beats is migrated to go modules, or mage supports vendored dependencies. {issue}13998[13998] {pull}14162[14162]
- Metricbeat module builders call host parser only once when instantiating light modules. {pull}20149[20149]
Expand Down
54 changes: 32 additions & 22 deletions libbeat/autodiscover/providers/kubernetes/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@
func NewPodEventer(uuid uuid.UUID, cfg *conf.C, client k8s.Interface, publish func(event []bus.Event)) (Eventer, error) {
logger := logp.NewLogger("autodiscover.pod")

var replicaSetWatcher, jobWatcher kubernetes.Watcher
var replicaSetWatcher, jobWatcher, nodeWatcher, namespaceWatcher kubernetes.Watcher

config := defaultConfig()
err := cfg.Unpack(&config)
Expand Down Expand Up @@ -96,40 +96,50 @@
return nil, fmt.Errorf("couldn't create watcher for %T due to error %w", &kubernetes.Pod{}, err)
}

options := kubernetes.WatchOptions{
SyncTimeout: config.SyncPeriod,
Node: config.Node,
ChrsMark marked this conversation as resolved.
Show resolved Hide resolved
Namespace: config.Namespace,
}

metaConf := config.AddResourceMetadata
nodeWatcher, err := kubernetes.NewNamedWatcher("node", client, &kubernetes.Node{}, options, nil)
if err != nil {
logger.Errorf("couldn't create watcher for %T due to error %+v", &kubernetes.Node{}, err)

var options kubernetes.WatchOptions
if metaConf.Node.Enabled() {
options = kubernetes.WatchOptions{
SyncTimeout: config.SyncPeriod,
Node: config.Node,
Namespace: config.Namespace,
}
nodeWatcher, err = kubernetes.NewNamedWatcher("node", client, &kubernetes.Node{}, options, nil)
if err != nil {
logger.Errorf("couldn't create watcher for %T due to error %+v", &kubernetes.Node{}, err)
}
}
namespaceWatcher, err := kubernetes.NewNamedWatcher("namespace", client, &kubernetes.Namespace{}, kubernetes.WatchOptions{
SyncTimeout: config.SyncPeriod,
}, nil)
if err != nil {
logger.Errorf("couldn't create watcher for %T due to error %+v", &kubernetes.Namespace{}, err)
if metaConf.Namespace.Enabled() {
options = kubernetes.WatchOptions{
SyncTimeout: config.SyncPeriod,
}
namespaceWatcher, err = kubernetes.NewNamedWatcher("namespace", client, &kubernetes.Namespace{}, options, nil)
if err != nil {
logger.Errorf("couldn't create watcher for %T due to error %+v", &kubernetes.Namespace{}, err)
}
}

// Resource is Pod so we need to create watchers for Replicasets and Jobs that it might belongs to
// Resource is Pod, so we need to create watchers for Replicasets and Jobs that it might belong to
// in order to be able to retrieve 2nd layer Owner metadata like in case of:
// Deployment -> Replicaset -> Pod
// CronJob -> job -> Pod
if metaConf.Deployment {
replicaSetWatcher, err = kubernetes.NewNamedWatcher("resource_metadata_enricher_rs", client, &kubernetes.ReplicaSet{}, kubernetes.WatchOptions{
options = kubernetes.WatchOptions{
Copy link
Member

@ChrsMark ChrsMark Nov 23, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Minor suggestion since we touch this codebase: Would it make sense to use the same Namespace scope setting with the one used for the Pod watcher?

If we watch for Pods on specific Namespace then their parent Deployments and CronJobs will be on the same Namespace as well.

SyncTimeout: config.SyncPeriod,
}, nil)
}
replicaSetWatcher, err = kubernetes.NewNamedWatcher("resource_metadata_enricher_rs", client,
&kubernetes.ReplicaSet{}, options, nil)
if err != nil {
logger.Errorf("Error creating watcher for %T due to error %+v", &kubernetes.ReplicaSet{}, err)
}
}
if metaConf.CronJob {
jobWatcher, err = kubernetes.NewNamedWatcher("resource_metadata_enricher_job", client, &kubernetes.Job{}, kubernetes.WatchOptions{
options = kubernetes.WatchOptions{
SyncTimeout: config.SyncPeriod,
}, nil)
}
jobWatcher, err = kubernetes.NewNamedWatcher("resource_metadata_enricher_job", client, &kubernetes.Job{},
options, nil)
if err != nil {
logger.Errorf("Error creating watcher for %T due to error %+v", &kubernetes.Job{}, err)
}
Expand All @@ -152,12 +162,12 @@

watcher.AddEventHandler(p)

if nodeWatcher != nil && (config.Hints.Enabled() || metaConf.Node.Enabled()) {
if nodeWatcher != nil && config.Hints.Enabled() {
updater := kubernetes.NewNodePodUpdater(p.unlockedUpdate, watcher.Store(), &p.crossUpdate)
nodeWatcher.AddEventHandler(updater)
}

if namespaceWatcher != nil && (config.Hints.Enabled() || metaConf.Namespace.Enabled()) {
if namespaceWatcher != nil && config.Hints.Enabled() {
Copy link
Member

@ChrsMark ChrsMark Nov 22, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Now that I see this line again I realize that metaConf.Namespace/Node was not checked in order to initialize the watchers above because these Watchers are used for Hint's based autodiscovery anyways no matter what the add_resource_metadata defines.

Let's take for example the namespaceWatcher:
We want to trigger the updater even if metaConf.Namespace.Enabled()!=true in case of having Hints enabled. Some details can be found at #25117.

So I believe we need to rethink of this patch more carefully. I see 2 options here:

  1. We initialize the watchers if config.Hints.Enabled() || metaConf.Namespace.Enabled() anyways in order to have the hints to work as expected.
  2. Introduce new settings to disable the Watchers only for users that actually don't have permissions to watch on Namespaces and/or Nodes.

The point is that we cannot couple Hint's based autodiscovery functionality with the add_resource_metadata setting. That's why we had this || in this if statement here.

Most probably that's why we fetch the namespace Annotations at

namespaceAnnotations := kubernetes.PodNamespaceAnnotations(pod, p.namespaceWatcher)
using the Watcher.

A real use case would be the following:

a) the users have disabled the namespace metadata enrichment with add_resource_metadata.namespace.enabled: false
b) but they want to have hints' based autodiscovery based on Namespace's annotations (see https://www.elastic.co/guide/en/beats/metricbeat/current/configuration-autodiscover-hints.html#_namespace_defaults)

The hints' events won't be complete because we won't have Namespace annotations at

kubemeta["namespace_annotations"] = namespaceAnnotations
.

I might miss sth here but my point is that we need to revisit this seeing the big picture and then decide accordingly. As we can see there are lot's of different pieces affected here.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We initialize the watchers if config.Hints.Enabled() || metaConf.Namespace.Enabled() anyways in order to have the hints to work as expected.

I like this option, because this way we can still stop the watchers with hints.enabled: false. The main problem as it is now is that we have no way to stop them.
We would have to update the documentation for Autodiscover with this, as we never mention the option hints (only at this page).

Introduce new settings to disable the Watchers only for users that actually don't have permissions to watch on Namespaces and/or Nodes.

Maybe the user would have way too many options when we can already use the ones available.

A real use case would be the following:

Thanks, it is clear 👍

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We initialize the watchers if config.Hints.Enabled() || metaConf.Namespace.Enabled() anyways in order to have the hints to work as expected.

I commited new changes so now it works like this.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also somehow related: #34717

updater := kubernetes.NewNamespacePodUpdater(p.unlockedUpdate, watcher.Store(), &p.crossUpdate)
namespaceWatcher.AddEventHandler(updater)
}
Expand Down Expand Up @@ -407,7 +417,7 @@
ports = []kubernetes.ContainerPort{{ContainerPort: 0}}
}

var events []bus.Event

Check failure on line 420 in libbeat/autodiscover/providers/kubernetes/pod.go

View workflow job for this annotation

GitHub Actions / lint (windows)

Consider pre-allocating `events` (prealloc)

Check failure on line 420 in libbeat/autodiscover/providers/kubernetes/pod.go

View workflow job for this annotation

GitHub Actions / lint (linux)

Consider pre-allocating `events` (prealloc)
portsMap := mapstr.M{}

ShouldPut(meta, "container", cmeta, p.logger)
Expand Down
25 changes: 15 additions & 10 deletions libbeat/autodiscover/providers/kubernetes/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,25 +67,30 @@
return nil, fmt.Errorf("couldn't create watcher for %T due to error %w", &kubernetes.Service{}, err)
}

metaConf := config.AddResourceMetadata

var namespaceMeta metadata.MetaGen
var options kubernetes.WatchOptions
var namespaceWatcher kubernetes.Watcher
if metaConf.Namespace.Enabled() {
options = kubernetes.WatchOptions{
SyncTimeout: config.SyncPeriod,
Namespace: config.Namespace,
}
namespaceWatcher, err = kubernetes.NewNamedWatcher("namespace", client, &kubernetes.Namespace{}, options, nil)
if err != nil {
return nil, fmt.Errorf("couldn't create watcher for %T due to error %w", &kubernetes.Namespace{}, err)
}

metaConf := metadata.GetDefaultResourceMetadataConfig()
namespaceWatcher, err = kubernetes.NewNamedWatcher("namespace", client, &kubernetes.Namespace{}, kubernetes.WatchOptions{
SyncTimeout: config.SyncPeriod,
Namespace: config.Namespace,
}, nil)
if err != nil {
return nil, fmt.Errorf("couldn't create watcher for %T due to error %w", &kubernetes.Namespace{}, err)
}
namespaceMeta = metadata.NewNamespaceMetadataGenerator(metaConf.Namespace, namespaceWatcher.Store(), client)

namespaceMeta = metadata.NewNamespaceMetadataGenerator(metaConf.Namespace, namespaceWatcher.Store(), client)
}

p := &service{
config: config,
uuid: uuid,
publish: publish,
metagen: metadata.NewServiceMetadataGenerator(cfg, watcher.Store(), namespaceMeta, client),
constanca-m marked this conversation as resolved.
Show resolved Hide resolved
metagen: namespaceMeta,
namespaceWatcher: namespaceWatcher,
logger: logger,
watcher: watcher,
Expand Down Expand Up @@ -220,7 +225,7 @@
}
}

var events []bus.Event

Check failure on line 228 in libbeat/autodiscover/providers/kubernetes/service.go

View workflow job for this annotation

GitHub Actions / lint (windows)

Consider pre-allocating `events` (prealloc)

Check failure on line 228 in libbeat/autodiscover/providers/kubernetes/service.go

View workflow job for this annotation

GitHub Actions / lint (linux)

Consider pre-allocating `events` (prealloc)
for _, port := range svc.Spec.Ports {
event := bus.Event{
"provider": s.uuid,
Expand Down
Loading