Skip to content

Commit

Permalink
Add unit tests for Promtehus CR watcher
Browse files Browse the repository at this point in the history
Signed-off-by: Matej Gera <[email protected]>
  • Loading branch information
matej-g committed May 4, 2023
1 parent bb4bd44 commit 4dc5200
Showing 1 changed file with 348 additions and 0 deletions.
348 changes: 348 additions & 0 deletions cmd/otel-allocator/watcher/promOperator_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,348 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package watcher

import (
"context"
"testing"
"time"

"github.com/go-kit/log"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
fakemonitoringclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake"
"github.com/prometheus-operator/prometheus-operator/pkg/informers"
"github.com/prometheus-operator/prometheus-operator/pkg/prometheus"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
promconfig "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery"
kubeDiscovery "github.com/prometheus/prometheus/discovery/kubernetes"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/cache"
)

func TestLoadConfig(t *testing.T) {
tests := []struct {
name string
serviceMonitor *monitoringv1.ServiceMonitor
podMonitor *monitoringv1.PodMonitor
want *promconfig.Config
wantErr bool
}{
{
name: "simple test",
serviceMonitor: &monitoringv1.ServiceMonitor{
ObjectMeta: metav1.ObjectMeta{
Name: "simple",
Namespace: "test",
},
Spec: monitoringv1.ServiceMonitorSpec{
JobLabel: "test",
Endpoints: []monitoringv1.Endpoint{
{
Port: "web",
},
},
},
},
podMonitor: &monitoringv1.PodMonitor{
ObjectMeta: metav1.ObjectMeta{
Name: "simple",
Namespace: "test",
},
Spec: monitoringv1.PodMonitorSpec{
JobLabel: "test",
PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{
{
Port: "web",
},
},
},
},
want: &promconfig.Config{
ScrapeConfigs: []*promconfig.ScrapeConfig{
{
JobName: "serviceMonitor/test/simple/0",
ScrapeInterval: model.Duration(30 * time.Second),
ScrapeTimeout: model.Duration(10 * time.Second),
HonorTimestamps: true,
HonorLabels: false,
Scheme: "http",
MetricsPath: "/metrics",
ServiceDiscoveryConfigs: []discovery.Config{
&kubeDiscovery.SDConfig{
Role: "endpointslice",
NamespaceDiscovery: kubeDiscovery.NamespaceDiscovery{
Names: []string{"test"},
IncludeOwnNamespace: false,
},
HTTPClientConfig: config.DefaultHTTPClientConfig,
},
},
HTTPClientConfig: config.DefaultHTTPClientConfig,
},
{
JobName: "podMonitor/test/simple/0",
ScrapeInterval: model.Duration(30 * time.Second),
ScrapeTimeout: model.Duration(10 * time.Second),
HonorTimestamps: true,
HonorLabels: false,
Scheme: "http",
MetricsPath: "/metrics",
ServiceDiscoveryConfigs: []discovery.Config{
&kubeDiscovery.SDConfig{
Role: "pod",
NamespaceDiscovery: kubeDiscovery.NamespaceDiscovery{
Names: []string{"test"},
IncludeOwnNamespace: false,
},
HTTPClientConfig: config.DefaultHTTPClientConfig,
},
},
HTTPClientConfig: config.DefaultHTTPClientConfig,
},
},
},
},
{
name: "basic auth (serviceMonitor)",
serviceMonitor: &monitoringv1.ServiceMonitor{
ObjectMeta: metav1.ObjectMeta{
Name: "auth",
Namespace: "test",
},
Spec: monitoringv1.ServiceMonitorSpec{
JobLabel: "auth",
Endpoints: []monitoringv1.Endpoint{
{
Port: "web",
BasicAuth: &monitoringv1.BasicAuth{
Username: v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "basic-auth",
},
Key: "username",
},
Password: v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "basic-auth",
},
Key: "password",
},
},
},
},
Selector: metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "auth",
},
},
},
},
want: &promconfig.Config{
GlobalConfig: promconfig.GlobalConfig{},
ScrapeConfigs: []*promconfig.ScrapeConfig{
{
JobName: "serviceMonitor/test/auth/0",
ScrapeInterval: model.Duration(30 * time.Second),
ScrapeTimeout: model.Duration(10 * time.Second),
HonorTimestamps: true,
HonorLabels: false,
Scheme: "http",
MetricsPath: "/metrics",
ServiceDiscoveryConfigs: []discovery.Config{
&kubeDiscovery.SDConfig{
Role: "endpointslice",
NamespaceDiscovery: kubeDiscovery.NamespaceDiscovery{
Names: []string{"test"},
IncludeOwnNamespace: false,
},
HTTPClientConfig: config.DefaultHTTPClientConfig,
},
},
HTTPClientConfig: config.HTTPClientConfig{
FollowRedirects: true,
EnableHTTP2: true,
BasicAuth: &config.BasicAuth{
Username: "admin",
Password: "password",
},
},
},
},
},
},
{
name: "bearer token (podMonitor)",
podMonitor: &monitoringv1.PodMonitor{
ObjectMeta: metav1.ObjectMeta{
Name: "bearer",
Namespace: "test",
},
Spec: monitoringv1.PodMonitorSpec{
JobLabel: "bearer",
PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{
{
Port: "web",
BearerTokenSecret: v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "bearer",
},
Key: "token",
},
},
},
},
},
want: &promconfig.Config{
GlobalConfig: promconfig.GlobalConfig{},
ScrapeConfigs: []*promconfig.ScrapeConfig{
{
JobName: "podMonitor/test/bearer/0",
ScrapeInterval: model.Duration(30 * time.Second),
ScrapeTimeout: model.Duration(10 * time.Second),
HonorTimestamps: true,
HonorLabels: false,
Scheme: "http",
MetricsPath: "/metrics",
ServiceDiscoveryConfigs: []discovery.Config{
&kubeDiscovery.SDConfig{
Role: "pod",
NamespaceDiscovery: kubeDiscovery.NamespaceDiscovery{
Names: []string{"test"},
IncludeOwnNamespace: false,
},
HTTPClientConfig: config.DefaultHTTPClientConfig,
},
},
HTTPClientConfig: config.HTTPClientConfig{
FollowRedirects: true,
EnableHTTP2: true,
Authorization: &config.Authorization{
Type: "Bearer",
Credentials: "bearer-token",
},
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
w := getTestPrometheuCRWatcher(t, tt.serviceMonitor, tt.podMonitor)
for _, informer := range w.informers {
// Start informers in order to populate cache.
informer.Start(w.stopChannel)
}

// Wait for informers to sync.
for _, informer := range w.informers {
for !informer.HasSynced() {
time.Sleep(50 * time.Millisecond)
}
}

got, err := w.LoadConfig(context.Background())
assert.NoError(t, err)

sanitizeScrapeConfigsForTest(got.ScrapeConfigs)
assert.Equal(t, tt.want.ScrapeConfigs, got.ScrapeConfigs)
})
}
}

// getTestPrometheuCRWatcher creates a test instance of PrometheusCRWatcher with fake clients
// and test secrets.
func getTestPrometheuCRWatcher(t *testing.T, sm *monitoringv1.ServiceMonitor, pm *monitoringv1.PodMonitor) *PrometheusCRWatcher {
mClient := fakemonitoringclient.NewSimpleClientset()
if sm != nil {
_, err := mClient.MonitoringV1().ServiceMonitors("test").Create(context.Background(), sm, metav1.CreateOptions{})
if err != nil {
t.Fatal(t, err)
}
}
if pm != nil {
_, err := mClient.MonitoringV1().PodMonitors("test").Create(context.Background(), pm, metav1.CreateOptions{})
if err != nil {
t.Fatal(t, err)
}
}

k8sClient := fake.NewSimpleClientset()
_, err := k8sClient.CoreV1().Secrets("test").Create(context.Background(), &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "basic-auth",
Namespace: "test",
},
Data: map[string][]byte{"username": []byte("admin"), "password": []byte("password")},
}, metav1.CreateOptions{})
if err != nil {
t.Fatal(t, err)
}
_, err = k8sClient.CoreV1().Secrets("test").Create(context.Background(), &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "bearer",
Namespace: "test",
},
Data: map[string][]byte{"token": []byte("bearer-token")},
}, metav1.CreateOptions{})
if err != nil {
t.Fatal(t, err)
}

factory := informers.NewMonitoringInformerFactories(map[string]struct{}{v1.NamespaceAll: {}}, map[string]struct{}{}, mClient, 0, nil)
informers, err := getInformers(factory)
if err != nil {
t.Fatal(t, err)
}

generator, err := prometheus.NewConfigGenerator(log.NewNopLogger(), &monitoringv1.Prometheus{}, true)
if err != nil {
t.Fatal(t, err)
}

return &PrometheusCRWatcher{
kubeMonitoringClient: mClient,
k8sClient: k8sClient,
informers: informers,
configGenerator: generator,
serviceMonitorSelector: getSelector(nil),
podMonitorSelector: getSelector(nil),
stopChannel: make(chan struct{}),
}
}

// Remove relable configs fields from scrape configs for testing,
// since these are mutated and tested down the line with the hook(s).
func sanitizeScrapeConfigsForTest(scs []*promconfig.ScrapeConfig) {
for _, sc := range scs {
sc.RelabelConfigs = nil
sc.MetricRelabelConfigs = nil
}
}

type mockInformerLister struct{}

func (m *mockInformerLister) Informer() cache.SharedIndexInformer {
return nil
}

func (m *mockInformerLister) Lister() cache.GenericLister {
return nil
}

0 comments on commit 4dc5200

Please sign in to comment.