From c8215864ab6927f46bfd5afb99f6f6f893579346 Mon Sep 17 00:00:00 2001 From: Warren Fernandes Date: Wed, 6 Nov 2019 16:59:08 -0700 Subject: [PATCH] Add metrics to machine and cluster controller Signed-off-by: Warren Fernandes --- controllers/cluster_controller.go | 43 ++- controllers/cluster_controller_phases.go | 19 +- controllers/cluster_controller_phases_test.go | 281 +++++++++++++----- controllers/cluster_controller_test.go | 14 + controllers/machine_controller.go | 31 ++ controllers/machine_controller_noderef.go | 10 +- controllers/machine_controller_phases.go | 20 +- controllers/machine_controller_phases_test.go | 125 +++++--- go.mod | 2 + 9 files changed, 422 insertions(+), 123 deletions(-) diff --git a/controllers/cluster_controller.go b/controllers/cluster_controller.go index bf0ee6a40d02..20dbf4a980b1 100644 --- a/controllers/cluster_controller.go +++ b/controllers/cluster_controller.go @@ -25,6 +25,7 @@ import ( "github.com/go-logr/logr" "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" @@ -40,6 +41,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/metrics" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" ) @@ -50,6 +52,38 @@ const ( deleteRequeueAfter = 5 * time.Second ) +var ( + clusterControlPlaneReady = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "capi_cluster_control_plane_ready", + Help: "Cluster control plane is ready if set to 1 and not if 0.", + }, + []string{"cluster", "namespace"}, + ) + clusterInfrastructureReady = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "capi_cluster_infrastructure_ready", + Help: "Cluster infrastructure is ready if set to 1 and not if 0.", + }, + []string{"cluster", "namespace"}, + ) + clusterKubeconfigReady = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "capi_cluster_kubeconfig_ready", + Help: "Cluster kubeconfig is ready if set to 1 and not if 0.", + }, + []string{"cluster", "namespace"}, + ) +) + +func init() { + metrics.Registry.MustRegister( + clusterControlPlaneReady, + clusterInfrastructureReady, + clusterKubeconfigReady, + ) +} + // +kubebuilder:rbac:groups=core,resources=events,verbs=get;list;watch;create;patch // +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;patch // +kubebuilder:rbac:groups=core,resources=nodes,verbs=get;list;watch;create;update;patch;delete @@ -338,7 +372,14 @@ func splitMachineList(list *clusterv1.MachineList) (*clusterv1.MachineList, *clu return controlplanes, nodes } -func (r *ClusterReconciler) reconcileControlPlaneInitialized(ctx context.Context, cluster *clusterv1.Cluster) error { +func (r *ClusterReconciler) reconcileControlPlaneInitialized(ctx context.Context, cluster *clusterv1.Cluster) (err error) { + defer func() { + if err != nil || !cluster.Status.ControlPlaneInitialized { + clusterControlPlaneReady.WithLabelValues(cluster.Name, cluster.Namespace).Set(0) + } else { + clusterControlPlaneReady.WithLabelValues(cluster.Name, cluster.Namespace).Set(1) + } + }() logger := r.Log.WithValues("cluster", cluster.Name, "namespace", cluster.Namespace) if cluster.Status.ControlPlaneInitialized { diff --git a/controllers/cluster_controller_phases.go b/controllers/cluster_controller_phases.go index 362a579df729..258ef103815f 100644 --- a/controllers/cluster_controller_phases.go +++ b/controllers/cluster_controller_phases.go @@ -143,7 +143,14 @@ func (r *ClusterReconciler) reconcileExternal(ctx context.Context, cluster *clus } // reconcileInfrastructure reconciles the Spec.InfrastructureRef object on a Cluster. -func (r *ClusterReconciler) reconcileInfrastructure(ctx context.Context, cluster *clusterv1.Cluster) error { +func (r *ClusterReconciler) reconcileInfrastructure(ctx context.Context, cluster *clusterv1.Cluster) (err error) { + defer func() { + if err != nil || !cluster.Status.InfrastructureReady { + clusterInfrastructureReady.WithLabelValues(cluster.Name, cluster.Namespace).Set(0) + } else { + clusterInfrastructureReady.WithLabelValues(cluster.Name, cluster.Namespace).Set(1) + } + }() logger := r.Log.WithValues("cluster", cluster.Name, "namespace", cluster.Namespace) if cluster.Spec.InfrastructureRef == nil { @@ -187,7 +194,15 @@ func (r *ClusterReconciler) reconcileInfrastructure(ctx context.Context, cluster return nil } -func (r *ClusterReconciler) reconcileKubeconfig(ctx context.Context, cluster *clusterv1.Cluster) error { +func (r *ClusterReconciler) reconcileKubeconfig(ctx context.Context, cluster *clusterv1.Cluster) (rerr error) { + defer func() { + if rerr != nil || len(cluster.Status.APIEndpoints) == 0 { + clusterKubeconfigReady.WithLabelValues(cluster.Name, cluster.Namespace).Set(0) + } else { + clusterKubeconfigReady.WithLabelValues(cluster.Name, cluster.Namespace).Set(1) + } + }() + if len(cluster.Status.APIEndpoints) == 0 { return nil } diff --git a/controllers/cluster_controller_phases_test.go b/controllers/cluster_controller_phases_test.go index 5a9a6a8257f7..ac793b829acc 100644 --- a/controllers/cluster_controller_phases_test.go +++ b/controllers/cluster_controller_phases_test.go @@ -20,90 +20,225 @@ import ( "context" "testing" + . "github.com/onsi/gomega" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/kubernetes/scheme" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" capierrors "sigs.k8s.io/cluster-api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/metrics" ) -func TestClusterReconciler_reconcileKubeconfig(t *testing.T) { - cluster := &clusterv1.Cluster{ - ObjectMeta: v1.ObjectMeta{ - Name: "test-cluster", - }, - Status: clusterv1.ClusterStatus{ - APIEndpoints: []clusterv1.APIEndpoint{{ - Host: "1.2.3.4", - Port: 0, - }}, - }, - } - - tests := []struct { - name string - cluster *clusterv1.Cluster - secret *corev1.Secret - wantErr bool - wantRequeue bool - }{ - { - name: "cluster not provisioned, apiEndpoint is not set", - cluster: &clusterv1.Cluster{}, - wantErr: false, - }, - { - name: "kubeconfig secret found", - cluster: cluster, - secret: &corev1.Secret{ - ObjectMeta: v1.ObjectMeta{ - Name: "test-cluster-kubeconfig", +func TestClusterReconciler(t *testing.T) { + t.Run("reconcile infrastructure", func(t *testing.T) { + cluster := &clusterv1.Cluster{ + ObjectMeta: v1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-namespace", + }, + Status: clusterv1.ClusterStatus{ + APIEndpoints: []clusterv1.APIEndpoint{{ + Host: "1.2.3.4", + Port: 0, + }}, + InfrastructureReady: true, + }, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: &corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha2", + Kind: "InfrastructureConfig", + Name: "test", + }, + }, + } + + tests := []struct { + name string + cluster *clusterv1.Cluster + infraRef map[string]interface{} + expectErr bool + expectedMetric float64 + }{ + { + name: "returns no error if infrastructure ref is nil", + cluster: &clusterv1.Cluster{ObjectMeta: v1.ObjectMeta{Name: "test-cluster", Namespace: "test-namespace"}}, + expectErr: false, + expectedMetric: 0, + }, + { + name: "returns error if unable to reconcile infrastructure ref", + cluster: cluster, + expectErr: true, + expectedMetric: 0, + }, + { + name: "returns no error if infra config is marked for deletion", + cluster: cluster, + infraRef: map[string]interface{}{ + "kind": "InfrastructureConfig", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "test-namespace", + "deletionTimestamp": "sometime", + }, }, + expectErr: false, + expectedMetric: 1, }, - wantErr: false, - }, - { - name: "kubeconfig secret not found, should return RequeueAfterError", - cluster: cluster, - wantErr: true, - wantRequeue: true, - }, - { - name: "invalid ca secret, should return error", - cluster: cluster, - secret: &corev1.Secret{ - ObjectMeta: v1.ObjectMeta{ - Name: "test-cluster-ca", + { + name: "returns no error and sets metric to 1 if infrastructure is marked ready on cluster", + cluster: cluster, + infraRef: map[string]interface{}{ + "kind": "InfrastructureConfig", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "test-namespace", + "deletionTimestamp": "sometime", + }, }, + expectErr: false, + expectedMetric: 1, }, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := clusterv1.AddToScheme(scheme.Scheme) - if err != nil { - t.Fatal(err) - } - - c := fake.NewFakeClient(tt.cluster) - if tt.secret != nil { - c = fake.NewFakeClient(tt.cluster, tt.secret) - } - r := &ClusterReconciler{ - Client: c, - } - err = r.reconcileKubeconfig(context.Background(), tt.cluster) - if (err != nil) != tt.wantErr { - t.Errorf("reconcileKubeconfig() error = %v, wantErr %v", err, tt.wantErr) - } - - _, hasRequeErr := errors.Cause(err).(capierrors.HasRequeueAfterError) - if tt.wantRequeue != hasRequeErr { - t.Errorf("expected RequeAfterError = %v, got %v", tt.wantRequeue, hasRequeErr) - } - }) - } + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + RegisterTestingT(t) + err := clusterv1.AddToScheme(scheme.Scheme) + if err != nil { + t.Fatal(err) + } + + var c client.Client + if tt.infraRef != nil { + infraConfig := &unstructured.Unstructured{Object: tt.infraRef} + c = fake.NewFakeClient(tt.cluster, infraConfig) + } else { + c = fake.NewFakeClient(tt.cluster) + } + r := &ClusterReconciler{ + Client: c, + Log: log.Log, + } + + err = r.reconcileInfrastructure(context.Background(), tt.cluster) + if tt.expectErr { + Expect(err).To(HaveOccurred()) + } else { + Expect(err).ToNot(HaveOccurred()) + } + + mr, err := metrics.Registry.Gather() + Expect(err).ToNot(HaveOccurred()) + mf := getMetricFamily(mr, "capi_cluster_infrastructure_ready") + Expect(mf).ToNot(BeNil()) + Expect(mf.GetMetric()[0].GetGauge().GetValue()).To(Equal(tt.expectedMetric)) + }) + } + + }) + + t.Run("reconcile kubeconfig", func(t *testing.T) { + cluster := &clusterv1.Cluster{ + ObjectMeta: v1.ObjectMeta{ + Name: "test-cluster", + }, + Status: clusterv1.ClusterStatus{ + APIEndpoints: []clusterv1.APIEndpoint{{ + Host: "1.2.3.4", + Port: 0, + }}, + }, + } + + tests := []struct { + name string + cluster *clusterv1.Cluster + secret *corev1.Secret + wantErr bool + wantRequeue bool + expectedMetric float64 + }{ + { + name: "cluster not provisioned, apiEndpoint is not set", + cluster: &clusterv1.Cluster{}, + wantErr: false, + expectedMetric: 0, + }, + { + name: "kubeconfig secret found", + cluster: cluster, + secret: &corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: "test-cluster-kubeconfig", + }, + }, + wantErr: false, + expectedMetric: 1, + }, + { + name: "kubeconfig secret not found, should return RequeueAfterError", + cluster: cluster, + wantErr: true, + wantRequeue: true, + expectedMetric: 0, + }, + { + name: "invalid ca secret, should return error", + cluster: cluster, + secret: &corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: "test-cluster-ca", + }, + }, + wantErr: true, + expectedMetric: 0, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + RegisterTestingT(t) + err := clusterv1.AddToScheme(scheme.Scheme) + if err != nil { + t.Fatal(err) + } + + c := fake.NewFakeClient(tt.cluster) + if tt.secret != nil { + c = fake.NewFakeClient(tt.cluster, tt.secret) + } + r := &ClusterReconciler{ + Client: c, + } + err = r.reconcileKubeconfig(context.Background(), tt.cluster) + if (err != nil) != tt.wantErr { + t.Errorf("reconcileKubeconfig() error = %v, wantErr %v", err, tt.wantErr) + } + + _, hasRequeErr := errors.Cause(err).(capierrors.HasRequeueAfterError) + if tt.wantRequeue != hasRequeErr { + t.Errorf("expected RequeAfterError = %v, got %v", tt.wantRequeue, hasRequeErr) + } + + mr, err := metrics.Registry.Gather() + Expect(err).ToNot(HaveOccurred()) + mf := getMetricFamily(mr, "capi_cluster_kubeconfig_ready") + Expect(mf).ToNot(BeNil()) + for _, m := range mf.GetMetric() { + for _, lp := range m.GetLabel() { + if lp.GetName() == "cluster" && lp.GetValue() == "test-cluster" { + Expect(m.GetGauge().GetValue()).To(Equal(tt.expectedMetric)) + } + } + } + }) + } + }) } diff --git a/controllers/cluster_controller_test.go b/controllers/cluster_controller_test.go index 79fa3d157212..08fc6cbf07f9 100644 --- a/controllers/cluster_controller_test.go +++ b/controllers/cluster_controller_test.go @@ -34,6 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/metrics" ) var _ = Describe("Cluster Reconciler", func() { @@ -193,6 +194,7 @@ var _ = Describe("Cluster Reconciler", func() { instance.Spec.InfrastructureRef.Name == "test" }, timeout).Should(BeTrue()) }) + It("Should successfully patch a cluster object if only removing finalizers", func() { // Setup cluster := &clusterv1.Cluster{ @@ -320,6 +322,18 @@ var _ = Describe("Cluster Reconciler", func() { } return cluster.Status.ControlPlaneInitialized }, timeout).Should(BeTrue()) + + mr, err := metrics.Registry.Gather() + Expect(err).ToNot(HaveOccurred()) + mf := getMetricFamily(mr, "capi_cluster_control_plane_ready") + Expect(mf).ToNot(BeNil()) + for _, m := range mf.GetMetric() { + for _, l := range m.GetLabel() { + if l.GetName() == "cluster" && l.GetValue() == cluster.Name { + Expect(m.GetGauge().GetValue()).To(Equal(float64(1))) + } + } + } }) }) diff --git a/controllers/machine_controller.go b/controllers/machine_controller.go index a7d99004aaa1..0f205755cf44 100644 --- a/controllers/machine_controller.go +++ b/controllers/machine_controller.go @@ -24,6 +24,7 @@ import ( "github.com/go-logr/logr" "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -45,14 +46,44 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/metrics" ) var ( errNilNodeRef = errors.New("noderef is nil") errLastControlPlaneNode = errors.New("last control plane member") errNoControlPlaneNodes = errors.New("no control plane members") + machineBootstrapReady = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "capi_machine_bootstrap_ready", + Help: "Machine Boostrap is ready if set to 1 and not if 0.", + }, + []string{"machine", "namespace", "cluster"}, + ) + machineInfrastructureReady = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "capi_machine_infrastructure_ready", + Help: "Machine InfrastructureRef is ready if set to 1 and not if 0.", + }, + []string{"machine", "namespace", "cluster"}, + ) + machineNodeReady = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "capi_machine_node_ready", + Help: "Machine NodeRef is ready if set to 1 and not if 0.", + }, + []string{"machine", "namespace", "cluster"}, + ) ) +func init() { + metrics.Registry.MustRegister( + machineBootstrapReady, + machineInfrastructureReady, + machineNodeReady, + ) +} + // +kubebuilder:rbac:groups=core,resources=events,verbs=get;list;watch;create;patch // +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch // +kubebuilder:rbac:groups=core,resources=nodes,verbs=get;list;watch;create;update;patch;delete diff --git a/controllers/machine_controller_noderef.go b/controllers/machine_controller_noderef.go index 12052f250854..e88f052a7be4 100644 --- a/controllers/machine_controller_noderef.go +++ b/controllers/machine_controller_noderef.go @@ -34,7 +34,15 @@ var ( ErrNodeNotFound = errors.New("cannot find node with matching ProviderID") ) -func (r *MachineReconciler) reconcileNodeRef(_ context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { +func (r *MachineReconciler) reconcileNodeRef(_ context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) (err error) { + defer func() { + if err != nil || machine.Status.NodeRef == nil { + machineNodeReady.WithLabelValues(machine.Name, machine.Namespace, machine.Spec.ClusterName).Set(0) + } else { + machineNodeReady.WithLabelValues(machine.Name, machine.Namespace, machine.Spec.ClusterName).Set(1) + } + }() + logger := r.Log.WithValues("machine", machine.Name, "namespace", machine.Namespace) // Check that the Machine hasn't been deleted or in the process. if !machine.DeletionTimestamp.IsZero() { diff --git a/controllers/machine_controller_phases.go b/controllers/machine_controller_phases.go index 4ed9dc47eedb..5f6dc5103942 100644 --- a/controllers/machine_controller_phases.go +++ b/controllers/machine_controller_phases.go @@ -151,7 +151,15 @@ func (r *MachineReconciler) reconcileExternal(ctx context.Context, m *clusterv1. } // reconcileBootstrap reconciles the Spec.Bootstrap.ConfigRef object on a Machine. -func (r *MachineReconciler) reconcileBootstrap(ctx context.Context, m *clusterv1.Machine) error { +func (r *MachineReconciler) reconcileBootstrap(ctx context.Context, m *clusterv1.Machine) (err error) { + defer func() { + if err != nil || !m.Status.BootstrapReady { + machineBootstrapReady.WithLabelValues(m.Name, m.Namespace, m.Spec.ClusterName).Set(0) + } else { + machineBootstrapReady.WithLabelValues(m.Name, m.Namespace, m.Spec.ClusterName).Set(1) + } + }() + // TODO(vincepri): Move this validation in kubebuilder / webhook. if m.Spec.Bootstrap.ConfigRef == nil && m.Spec.Bootstrap.Data == nil { return errors.Errorf( @@ -204,7 +212,15 @@ func (r *MachineReconciler) reconcileBootstrap(ctx context.Context, m *clusterv1 } // reconcileInfrastructure reconciles the Spec.InfrastructureRef object on a Machine. -func (r *MachineReconciler) reconcileInfrastructure(ctx context.Context, m *clusterv1.Machine) error { +func (r *MachineReconciler) reconcileInfrastructure(ctx context.Context, m *clusterv1.Machine) (err error) { + defer func() { + if err != nil || !m.Status.InfrastructureReady { + machineInfrastructureReady.WithLabelValues(m.Name, m.Namespace, m.Spec.ClusterName).Set(0) + } else { + machineInfrastructureReady.WithLabelValues(m.Name, m.Namespace, m.Spec.ClusterName).Set(1) + } + }() + // Call generic external reconciler. infraConfig, err := r.reconcileExternal(ctx, m, &m.Spec.InfrastructureRef) if infraConfig == nil && err == nil { diff --git a/controllers/machine_controller_phases_test.go b/controllers/machine_controller_phases_test.go index a4735795a298..6b626ce222cd 100644 --- a/controllers/machine_controller_phases_test.go +++ b/controllers/machine_controller_phases_test.go @@ -22,17 +22,19 @@ import ( "time" . "github.com/onsi/ginkgo" - "github.com/onsi/gomega" . "github.com/onsi/gomega" + dto "github.com/prometheus/client_model/go" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/metrics" ) func init() { @@ -376,7 +378,8 @@ func TestReconcileBootstrap(t *testing.T) { bootstrapConfig map[string]interface{} machine *clusterv1.Machine expectError bool - expected func(g *gomega.WithT, m *clusterv1.Machine) + expected func(g *WithT, m *clusterv1.Machine) + expectedMetric float64 }{ { name: "new machine, bootstrap config ready with data", @@ -393,11 +396,12 @@ func TestReconcileBootstrap(t *testing.T) { "bootstrapData": "#!/bin/bash ... data", }, }, - expectError: false, - expected: func(g *gomega.WithT, m *clusterv1.Machine) { - g.Expect(m.Status.BootstrapReady).To(gomega.BeTrue()) - g.Expect(m.Spec.Bootstrap.Data).ToNot(gomega.BeNil()) - g.Expect(*m.Spec.Bootstrap.Data).To(gomega.ContainSubstring("#!/bin/bash")) + expectError: false, + expectedMetric: 1, + expected: func(g *WithT, m *clusterv1.Machine) { + g.Expect(m.Status.BootstrapReady).To(BeTrue()) + g.Expect(m.Spec.Bootstrap.Data).ToNot(BeNil()) + g.Expect(*m.Spec.Bootstrap.Data).To(ContainSubstring("#!/bin/bash")) }, }, { @@ -414,10 +418,11 @@ func TestReconcileBootstrap(t *testing.T) { "ready": true, }, }, - expectError: true, - expected: func(g *gomega.WithT, m *clusterv1.Machine) { - g.Expect(m.Status.BootstrapReady).To(gomega.BeFalse()) - g.Expect(m.Spec.Bootstrap.Data).To(gomega.BeNil()) + expectError: true, + expectedMetric: 0, + expected: func(g *WithT, m *clusterv1.Machine) { + g.Expect(m.Status.BootstrapReady).To(BeFalse()) + g.Expect(m.Spec.Bootstrap.Data).To(BeNil()) }, }, { @@ -432,9 +437,10 @@ func TestReconcileBootstrap(t *testing.T) { "spec": map[string]interface{}{}, "status": map[string]interface{}{}, }, - expectError: true, - expected: func(g *gomega.WithT, m *clusterv1.Machine) { - g.Expect(m.Status.BootstrapReady).To(gomega.BeFalse()) + expectError: true, + expectedMetric: 0, + expected: func(g *WithT, m *clusterv1.Machine) { + g.Expect(m.Status.BootstrapReady).To(BeFalse()) }, }, { @@ -449,9 +455,10 @@ func TestReconcileBootstrap(t *testing.T) { "spec": map[string]interface{}{}, "status": map[string]interface{}{}, }, - expectError: true, - expected: func(g *gomega.WithT, m *clusterv1.Machine) { - g.Expect(m.Status.BootstrapReady).To(gomega.BeFalse()) + expectError: true, + expectedMetric: 0, + expected: func(g *WithT, m *clusterv1.Machine) { + g.Expect(m.Status.BootstrapReady).To(BeFalse()) }, }, { @@ -466,7 +473,8 @@ func TestReconcileBootstrap(t *testing.T) { "spec": map[string]interface{}{}, "status": map[string]interface{}{}, }, - expectError: true, + expectedMetric: 0, + expectError: true, }, { name: "existing machine, bootstrap data should not change", @@ -502,10 +510,11 @@ func TestReconcileBootstrap(t *testing.T) { BootstrapReady: true, }, }, - expectError: false, - expected: func(g *gomega.WithT, m *clusterv1.Machine) { - g.Expect(m.Status.BootstrapReady).To(gomega.BeTrue()) - g.Expect(*m.Spec.Bootstrap.Data).To(gomega.Equal("#!/bin/bash ... data")) + expectError: false, + expectedMetric: 1, + expected: func(g *WithT, m *clusterv1.Machine) { + g.Expect(m.Status.BootstrapReady).To(BeTrue()) + g.Expect(*m.Spec.Bootstrap.Data).To(Equal("#!/bin/bash ... data")) }, }, { @@ -542,16 +551,19 @@ func TestReconcileBootstrap(t *testing.T) { BootstrapReady: true, }, }, - expectError: false, - expected: func(g *gomega.WithT, m *clusterv1.Machine) { - g.Expect(m.Status.BootstrapReady).To(gomega.BeTrue()) + expectError: false, + expectedMetric: 1, + expected: func(g *WithT, m *clusterv1.Machine) { + g.Expect(m.Status.BootstrapReady).To(BeTrue()) }, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - g := gomega.NewGomegaWithT(t) + g := NewGomegaWithT(t) + err := clusterv1.AddToScheme(scheme.Scheme) + g.Expect(err).NotTo(HaveOccurred()) if tc.machine == nil { tc.machine = defaultMachine.DeepCopy() @@ -563,16 +575,22 @@ func TestReconcileBootstrap(t *testing.T) { Log: log.Log, } - err := r.reconcileBootstrap(context.Background(), tc.machine) + err = r.reconcileBootstrap(context.Background(), tc.machine) if tc.expectError { - g.Expect(err).ToNot(gomega.BeNil()) + g.Expect(err).ToNot(BeNil()) } else { - g.Expect(err).To(gomega.BeNil()) + g.Expect(err).To(BeNil()) } if tc.expected != nil { tc.expected(g, tc.machine) } + + mt, err := metrics.Registry.Gather() + g.Expect(err).ToNot(HaveOccurred()) + mf := getMetricFamily(mt, "capi_machine_bootstrap_ready") + g.Expect(mf).ToNot(BeNil()) + g.Expect(mf.GetMetric()[0].GetGauge().GetValue()).To(Equal(tc.expectedMetric)) }) } @@ -612,7 +630,8 @@ func TestReconcileInfrastructure(t *testing.T) { expectError bool expectChanged bool expectRequeueAfter bool - expected func(g *gomega.WithT, m *clusterv1.Machine) + expectedMetric float64 + expected func(g *WithT, m *clusterv1.Machine) }{ { name: "new machine, infrastructure config ready", @@ -640,10 +659,11 @@ func TestReconcileInfrastructure(t *testing.T) { }, }, }, - expectError: false, - expectChanged: true, - expected: func(g *gomega.WithT, m *clusterv1.Machine) { - g.Expect(m.Status.InfrastructureReady).To(gomega.BeTrue()) + expectError: false, + expectChanged: true, + expectedMetric: 1, + expected: func(g *WithT, m *clusterv1.Machine) { + g.Expect(m.Status.InfrastructureReady).To(BeTrue()) }, }, { @@ -693,18 +713,22 @@ func TestReconcileInfrastructure(t *testing.T) { }, expectError: true, expectRequeueAfter: true, - expected: func(g *gomega.WithT, m *clusterv1.Machine) { - g.Expect(m.Status.InfrastructureReady).To(gomega.BeTrue()) - g.Expect(m.Status.ErrorMessage).ToNot(gomega.BeNil()) - g.Expect(m.Status.ErrorReason).ToNot(gomega.BeNil()) - g.Expect(m.Status.GetTypedPhase()).To(gomega.Equal(clusterv1.MachinePhaseFailed)) + expectedMetric: 0, + expected: func(g *WithT, m *clusterv1.Machine) { + g.Expect(m.Status.InfrastructureReady).To(BeTrue()) + g.Expect(m.Status.ErrorMessage).ToNot(BeNil()) + g.Expect(m.Status.ErrorReason).ToNot(BeNil()) + g.Expect(m.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePhaseFailed)) + }, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - g := gomega.NewGomegaWithT(t) + g := NewGomegaWithT(t) + err := clusterv1.AddToScheme(scheme.Scheme) + g.Expect(err).NotTo(HaveOccurred()) if tc.machine == nil { tc.machine = defaultMachine.DeepCopy() @@ -716,19 +740,32 @@ func TestReconcileInfrastructure(t *testing.T) { Log: log.Log, } - err := r.reconcileInfrastructure(context.Background(), tc.machine) + err = r.reconcileInfrastructure(context.Background(), tc.machine) r.reconcilePhase(context.Background(), tc.machine) if tc.expectError { - g.Expect(err).ToNot(gomega.BeNil()) + g.Expect(err).ToNot(BeNil()) } else { - g.Expect(err).To(gomega.BeNil()) + g.Expect(err).To(BeNil()) } + mt, err := metrics.Registry.Gather() + g.Expect(err).ToNot(HaveOccurred()) + mf := getMetricFamily(mt, "capi_machine_infrastructure_ready") + g.Expect(mf).ToNot(BeNil()) + g.Expect(mf.GetMetric()[0].GetGauge().GetValue()).To(Equal(tc.expectedMetric)) + if tc.expected != nil { tc.expected(g, tc.machine) } }) - } +} +func getMetricFamily(list []*dto.MetricFamily, metricName string) *dto.MetricFamily { + for _, mf := range list { + if mf.GetName() == metricName { + return mf + } + } + return nil } diff --git a/go.mod b/go.mod index 08a09247b3df..a0b6952051aa 100644 --- a/go.mod +++ b/go.mod @@ -15,6 +15,8 @@ require ( github.com/onsi/ginkgo v1.10.1 github.com/onsi/gomega v1.7.0 github.com/pkg/errors v0.8.1 + github.com/prometheus/client_golang v1.0.0 + github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 github.com/prometheus/procfs v0.0.5 // indirect github.com/sergi/go-diff v1.0.0 github.com/spf13/cobra v0.0.5