diff --git a/.chloggen/fix-hpa-delete.yaml b/.chloggen/fix-hpa-delete.yaml new file mode 100755 index 0000000000..e9cfd3c894 --- /dev/null +++ b/.chloggen/fix-hpa-delete.yaml @@ -0,0 +1,16 @@ +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: 'bug_fix' + +# The name of the component, or a single word describing the area of concern, (e.g. operator, target allocator, github action) +component: operator + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Fixed HPA deletion + +# One or more tracking issues related to the change +issues: [2568, 2587, 2651] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: diff --git a/Makefile b/Makefile index eff886737a..e59b2e571a 100644 --- a/Makefile +++ b/Makefile @@ -60,7 +60,7 @@ endif START_KIND_CLUSTER ?= true -KUBE_VERSION ?= 1.24 +KUBE_VERSION ?= 1.29 KIND_CONFIG ?= kind-$(KUBE_VERSION).yaml KIND_CLUSTER_NAME ?= "otel-operator" @@ -367,7 +367,7 @@ KUSTOMIZE_VERSION ?= v5.0.3 CONTROLLER_TOOLS_VERSION ?= v0.12.0 GOLANGCI_LINT_VERSION ?= v1.54.0 KIND_VERSION ?= v0.20.0 -CHAINSAW_VERSION ?= v0.1.6 +CHAINSAW_VERSION ?= v0.1.7 .PHONY: install-tools install-tools: kustomize golangci-lint kind controller-gen envtest crdoc kind operator-sdk chainsaw diff --git a/bundle/manifests/opentelemetry-operator.clusterserviceversion.yaml b/bundle/manifests/opentelemetry-operator.clusterserviceversion.yaml index 0357e2f701..316060a979 100644 --- a/bundle/manifests/opentelemetry-operator.clusterserviceversion.yaml +++ b/bundle/manifests/opentelemetry-operator.clusterserviceversion.yaml @@ -65,7 +65,7 @@ metadata: categories: Logging & Tracing,Monitoring certified: "false" containerImage: ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator - createdAt: "2024-02-15T18:18:46Z" + createdAt: "2024-02-27T11:16:56Z" description: Provides the OpenTelemetry components, including the Collector operators.operatorframework.io/builder: operator-sdk-v1.29.0 operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 @@ -175,6 +175,8 @@ spec: - "" resources: - configmaps + - persistentvolumeclaims + - persistentvolumes - pods - serviceaccounts - services diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index c2b96f80a7..56318be008 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -8,6 +8,8 @@ rules: - "" resources: - configmaps + - persistentvolumeclaims + - persistentvolumes - pods - serviceaccounts - services diff --git a/controllers/builder_test.go b/controllers/builder_test.go index 8dd0bcfa21..4076ccd463 100644 --- a/controllers/builder_test.go +++ b/controllers/builder_test.go @@ -590,6 +590,9 @@ service: "app.kubernetes.io/instance": "test.test", "app.kubernetes.io/managed-by": "opentelemetry-operator", "app.kubernetes.io/name": "test-ingress", + "app.kubernetes.io/component": "opentelemetry-collector", + "app.kubernetes.io/part-of": "opentelemetry", + "app.kubernetes.io/version": "latest", }, Annotations: map[string]string{ "something": "true", diff --git a/controllers/common.go b/controllers/common.go index 2a7ff1d751..7885fa8a3a 100644 --- a/controllers/common.go +++ b/controllers/common.go @@ -23,6 +23,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/retry" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -77,7 +78,7 @@ func BuildOpAMPBridge(params manifests.Params) ([]client.Object, error) { } // reconcileDesiredObjects runs the reconcile process using the mutateFn over the given list of objects. -func reconcileDesiredObjects(ctx context.Context, kubeClient client.Client, logger logr.Logger, owner metav1.Object, scheme *runtime.Scheme, desiredObjects ...client.Object) error { +func reconcileDesiredObjects(ctx context.Context, kubeClient client.Client, logger logr.Logger, owner metav1.Object, scheme *runtime.Scheme, desiredObjects []client.Object, ownedObjects map[types.UID]client.Object) error { var errs []error for _, desired := range desiredObjects { l := logger.WithValues( @@ -91,7 +92,6 @@ func reconcileDesiredObjects(ctx context.Context, kubeClient client.Client, logg continue } } - // existing is an object the controller runtime will hydrate for us // we obtain the existing object by deep copying the desired object because it's the most convenient way existing := desired.DeepCopyObject().(client.Object) @@ -116,9 +116,29 @@ func reconcileDesiredObjects(ctx context.Context, kubeClient client.Client, logg } l.V(1).Info(fmt.Sprintf("desired has been %s", op)) + // This object is still managed by the operator, remove it from the list of objects to prune + delete(ownedObjects, existing.GetUID()) } if len(errs) > 0 { return fmt.Errorf("failed to create objects for %s: %w", owner.GetName(), errors.Join(errs...)) } + // Pruning owned objects in the cluster which are not should not be present after the reconciliation. + pruneErrs := []error{} + for _, obj := range ownedObjects { + l := logger.WithValues( + "object_name", obj.GetName(), + "object_kind", obj.GetObjectKind().GroupVersionKind(), + ) + + l.Info("pruning unmanaged resource") + err := kubeClient.Delete(ctx, obj) + if err != nil { + l.Error(err, "failed to delete resource") + pruneErrs = append(pruneErrs, err) + } + } + if len(pruneErrs) > 0 { + return fmt.Errorf("failed to prune objects for %s: %w", owner.GetName(), errors.Join(pruneErrs...)) + } return nil } diff --git a/controllers/opampbridge_controller.go b/controllers/opampbridge_controller.go index 670418ad5c..50e6ffa130 100644 --- a/controllers/opampbridge_controller.go +++ b/controllers/opampbridge_controller.go @@ -103,7 +103,7 @@ func (r *OpAMPBridgeReconciler) Reconcile(ctx context.Context, req ctrl.Request) if buildErr != nil { return ctrl.Result{}, buildErr } - err := reconcileDesiredObjects(ctx, r.Client, log, ¶ms.OpAMPBridge, params.Scheme, desiredObjects...) + err := reconcileDesiredObjects(ctx, r.Client, log, ¶ms.OpAMPBridge, params.Scheme, desiredObjects, nil) return opampbridgeStatus.HandleReconcileStatus(ctx, log, params, err) } diff --git a/controllers/opentelemetrycollector_controller.go b/controllers/opentelemetrycollector_controller.go index 5f4143d214..ce7c3f9185 100644 --- a/controllers/opentelemetrycollector_controller.go +++ b/controllers/opentelemetrycollector_controller.go @@ -17,24 +17,32 @@ package controllers import ( "context" + "fmt" "github.com/go-logr/logr" + routev1 "github.com/openshift/api/route/v1" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" appsv1 "k8s.io/api/apps/v1" autoscalingv2 "k8s.io/api/autoscaling/v2" corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" policyV1 "k8s.io/api/policy/v1" rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/open-telemetry/opentelemetry-operator/apis/v1alpha1" "github.com/open-telemetry/opentelemetry-operator/internal/api/convert" + "github.com/open-telemetry/opentelemetry-operator/internal/autodetect/openshift" "github.com/open-telemetry/opentelemetry-operator/internal/config" "github.com/open-telemetry/opentelemetry-operator/internal/manifests" + "github.com/open-telemetry/opentelemetry-operator/internal/manifests/collector" + "github.com/open-telemetry/opentelemetry-operator/internal/manifests/manifestutils" collectorStatus "github.com/open-telemetry/opentelemetry-operator/internal/status/collector" "github.com/open-telemetry/opentelemetry-operator/pkg/featuregate" ) @@ -57,6 +65,71 @@ type Params struct { Config config.Config } +func (r *OpenTelemetryCollectorReconciler) findOtelOwnedObjects(ctx context.Context, params manifests.Params) (map[types.UID]client.Object, error) { + ownedObjects := map[types.UID]client.Object{} + + listOps := &client.ListOptions{ + Namespace: params.OtelCol.Namespace, + LabelSelector: labels.SelectorFromSet(manifestutils.SelectorLabels(params.OtelCol.ObjectMeta, collector.ComponentOpenTelemetryCollector)), + } + hpaList := &autoscalingv2.HorizontalPodAutoscalerList{} + err := r.List(ctx, hpaList, listOps) + if err != nil { + return nil, fmt.Errorf("error listing HorizontalPodAutoscalers: %w", err) + } + for i := range hpaList.Items { + ownedObjects[hpaList.Items[i].GetUID()] = &hpaList.Items[i] + } + if featuregate.PrometheusOperatorIsAvailable.IsEnabled() { + servicemonitorList := &monitoringv1.ServiceMonitorList{} + err = r.List(ctx, servicemonitorList, listOps) + if err != nil { + return nil, fmt.Errorf("error listing ServiceMonitors: %w", err) + } + for i := range servicemonitorList.Items { + ownedObjects[servicemonitorList.Items[i].GetUID()] = servicemonitorList.Items[i] + } + + podMonitorList := &monitoringv1.PodMonitorList{} + err = r.List(ctx, podMonitorList, listOps) + if err != nil { + return nil, fmt.Errorf("error listing PodMonitors: %w", err) + } + for i := range podMonitorList.Items { + ownedObjects[podMonitorList.Items[i].GetUID()] = podMonitorList.Items[i] + } + } + ingressList := &networkingv1.IngressList{} + err = r.List(ctx, ingressList, listOps) + if err != nil { + return nil, fmt.Errorf("error listing Ingresses: %w", err) + } + for i := range ingressList.Items { + ownedObjects[ingressList.Items[i].GetUID()] = &ingressList.Items[i] + } + + if params.Config.OpenShiftRoutesAvailability() == openshift.RoutesAvailable { + routesList := &routev1.RouteList{} + err = r.List(ctx, routesList, listOps) + if err != nil { + return nil, fmt.Errorf("error listing Routes: %w", err) + } + for i := range routesList.Items { + ownedObjects[routesList.Items[i].GetUID()] = &routesList.Items[i] + } + } + pdbList := &policyV1.PodDisruptionBudgetList{} + err = r.List(ctx, pdbList, listOps) + if err != nil { + return nil, fmt.Errorf("error listing PodDisruptionBudgets: %w", err) + } + for i := range pdbList.Items { + ownedObjects[pdbList.Items[i].GetUID()] = &pdbList.Items[i] + } + + return ownedObjects, nil +} + func (r *OpenTelemetryCollectorReconciler) getParams(instance v1alpha1.OpenTelemetryCollector) (manifests.Params, error) { otelCol, err := convert.V1Alpha1to2(instance) if err != nil { @@ -84,7 +157,7 @@ func NewReconciler(p Params) *OpenTelemetryCollectorReconciler { return r } -// +kubebuilder:rbac:groups="",resources=pods;configmaps;services;serviceaccounts,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=pods;configmaps;services;serviceaccounts;persistentvolumeclaims;persistentvolumes,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups="",resources=events,verbs=create;patch // +kubebuilder:rbac:groups=apps,resources=daemonsets;deployments;statefulsets,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs=get;list;watch;create;update;patch;delete @@ -134,9 +207,13 @@ func (r *OpenTelemetryCollectorReconciler) Reconcile(ctx context.Context, req ct if buildErr != nil { return ctrl.Result{}, buildErr } - // TODO: https://github.com/open-telemetry/opentelemetry-operator/issues/2620 - // TODO: Change &instance to use params.OtelCol - err = reconcileDesiredObjects(ctx, r.Client, log, &instance, params.Scheme, desiredObjects...) + + ownedObjects, err := r.findOtelOwnedObjects(ctx, params) + if err != nil { + return ctrl.Result{}, err + } + + err = reconcileDesiredObjects(ctx, r.Client, log, &instance, params.Scheme, desiredObjects, ownedObjects) return collectorStatus.HandleReconcileStatus(ctx, log, params, instance, err) } @@ -150,6 +227,9 @@ func (r *OpenTelemetryCollectorReconciler) SetupWithManager(mgr ctrl.Manager) er Owns(&appsv1.Deployment{}). Owns(&appsv1.DaemonSet{}). Owns(&appsv1.StatefulSet{}). + Owns(&corev1.PersistentVolume{}). + Owns(&corev1.PersistentVolumeClaim{}). + Owns(&networkingv1.Ingress{}). Owns(&autoscalingv2.HorizontalPodAutoscaler{}). Owns(&policyV1.PodDisruptionBudget{}) @@ -162,6 +242,9 @@ func (r *OpenTelemetryCollectorReconciler) SetupWithManager(mgr ctrl.Manager) er builder.Owns(&monitoringv1.ServiceMonitor{}) builder.Owns(&monitoringv1.PodMonitor{}) } + if r.config.OpenShiftRoutesAvailability() == openshift.RoutesAvailable { + builder.Owns(&routev1.Route{}) + } return builder.Complete(r) } diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 00176baf4c..f289cf1c75 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -26,8 +26,10 @@ import ( "time" routev1 "github.com/openshift/api/route/v1" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -104,7 +106,7 @@ func TestMain(m *testing.M) { testEnv = &envtest.Environment{ CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, - CRDs: []*apiextensionsv1.CustomResourceDefinition{testdata.OpenShiftRouteCRD}, + CRDs: []*apiextensionsv1.CustomResourceDefinition{testdata.OpenShiftRouteCRD, testdata.ServiceMonitorCRD, testdata.PodMonitorCRD}, WebhookInstallOptions: envtest.WebhookInstallOptions{ Paths: []string{filepath.Join("..", "config", "webhook")}, }, @@ -114,12 +116,18 @@ func TestMain(m *testing.M) { fmt.Printf("failed to start testEnv: %v", err) os.Exit(1) } - + if err = monitoringv1.AddToScheme(testScheme); err != nil { + fmt.Printf("failed to register scheme: %v", err) + os.Exit(1) + } + if err = networkingv1.AddToScheme(testScheme); err != nil { + fmt.Printf("failed to register scheme: %v", err) + os.Exit(1) + } if err = routev1.AddToScheme(testScheme); err != nil { fmt.Printf("failed to register scheme: %v", err) os.Exit(1) } - if err = v1alpha1.AddToScheme(testScheme); err != nil { fmt.Printf("failed to register scheme: %v", err) os.Exit(1) diff --git a/internal/api/convert/v1alpha.go b/internal/api/convert/v1alpha.go index 53e37ebd7e..3079d627fe 100644 --- a/internal/api/convert/v1alpha.go +++ b/internal/api/convert/v1alpha.go @@ -51,6 +51,12 @@ func V1Alpha1to2(in v1alpha1.OpenTelemetryCollector) (v1beta1.OpenTelemetryColle Pods: m.Pods, } } + if copy.Spec.MaxReplicas != nil && copy.Spec.Autoscaler.MaxReplicas == nil { + copy.Spec.Autoscaler.MaxReplicas = copy.Spec.MaxReplicas + } + if copy.Spec.MinReplicas != nil && copy.Spec.Autoscaler.MinReplicas == nil { + copy.Spec.Autoscaler.MinReplicas = copy.Spec.MinReplicas + } out.Spec.OpenTelemetryCommonFields.Autoscaler = &v1beta1.AutoscalerSpec{ MinReplicas: copy.Spec.Autoscaler.MinReplicas, MaxReplicas: copy.Spec.Autoscaler.MaxReplicas, diff --git a/internal/manifests/collector/horizontalpodautoscaler.go b/internal/manifests/collector/horizontalpodautoscaler.go index 43c0b4354d..758217bd99 100644 --- a/internal/manifests/collector/horizontalpodautoscaler.go +++ b/internal/manifests/collector/horizontalpodautoscaler.go @@ -18,7 +18,6 @@ import ( autoscalingv2 "k8s.io/api/autoscaling/v2" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" "github.com/open-telemetry/opentelemetry-operator/apis/v1alpha1" "github.com/open-telemetry/opentelemetry-operator/internal/manifests" @@ -26,7 +25,7 @@ import ( "github.com/open-telemetry/opentelemetry-operator/internal/naming" ) -func HorizontalPodAutoscaler(params manifests.Params) (client.Object, error) { +func HorizontalPodAutoscaler(params manifests.Params) (*autoscalingv2.HorizontalPodAutoscaler, error) { name := naming.Collector(params.OtelCol.Name) labels := manifestutils.Labels(params.OtelCol.ObjectMeta, name, params.OtelCol.Spec.Image, ComponentOpenTelemetryCollector, params.Config.LabelsFilter()) annotations, err := Annotations(params.OtelCol) @@ -34,7 +33,7 @@ func HorizontalPodAutoscaler(params manifests.Params) (client.Object, error) { return nil, err } - var result client.Object + var result *autoscalingv2.HorizontalPodAutoscaler objectMeta := metav1.ObjectMeta{ Name: naming.HorizontalPodAutoscaler(params.OtelCol.Name), diff --git a/internal/manifests/collector/horizontalpodautoscaler_test.go b/internal/manifests/collector/horizontalpodautoscaler_test.go index 7ade5d1874..29134197bb 100644 --- a/internal/manifests/collector/horizontalpodautoscaler_test.go +++ b/internal/manifests/collector/horizontalpodautoscaler_test.go @@ -19,7 +19,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - autoscalingv2 "k8s.io/api/autoscaling/v2" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -95,11 +94,9 @@ func TestHPA(t *testing.T) { }, Log: logger, } - raw, err := HorizontalPodAutoscaler(params) + hpa, err := HorizontalPodAutoscaler(params) require.NoError(t, err) - hpa := raw.(*autoscalingv2.HorizontalPodAutoscaler) - // verify assert.Equal(t, "my-instance-collector", hpa.Name) assert.Equal(t, "my-instance-collector", hpa.Labels["app.kubernetes.io/name"]) diff --git a/internal/manifests/collector/ingress.go b/internal/manifests/collector/ingress.go index fecd6d6a92..18c6d0cb6c 100644 --- a/internal/manifests/collector/ingress.go +++ b/internal/manifests/collector/ingress.go @@ -25,10 +25,13 @@ import ( "github.com/open-telemetry/opentelemetry-operator/apis/v1beta1" "github.com/open-telemetry/opentelemetry-operator/internal/manifests" "github.com/open-telemetry/opentelemetry-operator/internal/manifests/collector/adapters" + "github.com/open-telemetry/opentelemetry-operator/internal/manifests/manifestutils" "github.com/open-telemetry/opentelemetry-operator/internal/naming" ) func Ingress(params manifests.Params) (*networkingv1.Ingress, error) { + name := naming.Ingress(params.OtelCol.Name) + labels := manifestutils.Labels(params.OtelCol.ObjectMeta, name, params.OtelCol.Spec.Image, ComponentOpenTelemetryCollector, params.Config.LabelsFilter()) if params.OtelCol.Spec.Ingress.Type != v1beta1.IngressTypeNginx { return nil, nil } @@ -58,11 +61,7 @@ func Ingress(params manifests.Params) (*networkingv1.Ingress, error) { Name: naming.Ingress(params.OtelCol.Name), Namespace: params.OtelCol.Namespace, Annotations: params.OtelCol.Spec.Ingress.Annotations, - Labels: map[string]string{ - "app.kubernetes.io/name": naming.Ingress(params.OtelCol.Name), - "app.kubernetes.io/instance": fmt.Sprintf("%s.%s", params.OtelCol.Namespace, params.OtelCol.Name), - "app.kubernetes.io/managed-by": "opentelemetry-operator", - }, + Labels: labels, }, Spec: networkingv1.IngressSpec{ TLS: params.OtelCol.Spec.Ingress.TLS, diff --git a/internal/manifests/collector/ingress_test.go b/internal/manifests/collector/ingress_test.go index 9a7ae7ff10..374d97c939 100644 --- a/internal/manifests/collector/ingress_test.go +++ b/internal/manifests/collector/ingress_test.go @@ -51,6 +51,22 @@ func TestDesiredIngresses(t *testing.T) { assert.NoError(t, err) }) + t.Run("should return nil, no ingress set", func(t *testing.T) { + params := manifests.Params{ + Config: config.Config{}, + Log: logger, + OtelCol: v1beta1.OpenTelemetryCollector{ + Spec: v1beta1.OpenTelemetryCollectorSpec{ + Mode: "Deployment", + }, + }, + } + + actual, err := Ingress(params) + assert.Nil(t, actual) + assert.NoError(t, err) + }) + t.Run("should return nil unable to parse receiver ports", func(t *testing.T) { params := manifests.Params{ Config: config.Config{}, @@ -104,6 +120,9 @@ func TestDesiredIngresses(t *testing.T) { "app.kubernetes.io/name": naming.Ingress(params.OtelCol.Name), "app.kubernetes.io/instance": fmt.Sprintf("%s.%s", params.OtelCol.Namespace, params.OtelCol.Name), "app.kubernetes.io/managed-by": "opentelemetry-operator", + "app.kubernetes.io/component": "opentelemetry-collector", + "app.kubernetes.io/part-of": "opentelemetry", + "app.kubernetes.io/version": "latest", }, }, Spec: networkingv1.IngressSpec{ diff --git a/internal/manifests/collector/testdata/pm_crd.go b/internal/manifests/collector/testdata/pm_crd.go new file mode 100644 index 0000000000..6ab4d494da --- /dev/null +++ b/internal/manifests/collector/testdata/pm_crd.go @@ -0,0 +1,38 @@ +package testdata + +import ( + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PodMonitorCRD as go structure. +var PodMonitorCRD = &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "podmonitors.monitoring.coreos.com", + }, + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "monitoring.coreos.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Served: true, + Storage: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + XPreserveUnknownFields: func(v bool) *bool { return &v }(true), + }, + }, + Subresources: &apiextensionsv1.CustomResourceSubresources{ + Status: &apiextensionsv1.CustomResourceSubresourceStatus{}, + }, + }, + }, + Scope: apiextensionsv1.NamespaceScoped, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "podmonitors", + Singular: "podmonitor", + Kind: "PodMonitor", + }, + }, +} diff --git a/main.go b/main.go index d9f0ec5da2..1711f4810e 100644 --- a/main.go +++ b/main.go @@ -28,6 +28,7 @@ import ( monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "github.com/spf13/pflag" colfeaturegate "go.opentelemetry.io/collector/featuregate" + networkingv1 "k8s.io/api/networking/v1" k8sruntime "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes" @@ -75,6 +76,7 @@ func init() { utilruntime.Must(otelv1alpha1.AddToScheme(scheme)) utilruntime.Must(routev1.AddToScheme(scheme)) utilruntime.Must(monitoringv1.AddToScheme(scheme)) + utilruntime.Must(networkingv1.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme } diff --git a/tests/e2e-autoscale/autoscale/04-error.yaml b/tests/e2e-autoscale/autoscale/04-error.yaml new file mode 100644 index 0000000000..04900e76a9 --- /dev/null +++ b/tests/e2e-autoscale/autoscale/04-error.yaml @@ -0,0 +1,28 @@ +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: simplest-collector +spec: + maxReplicas: 2 + scaleTargetRef: + apiVersion: opentelemetry.io/v1alpha1 + kind: OpenTelemetryCollector + name: simplest +status: + currentMetrics: null + currentReplicas: 1 +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: simplest-set-utilization-collector +spec: + maxReplicas: 2 + scaleTargetRef: + apiVersion: opentelemetry.io/v1alpha1 + kind: OpenTelemetryCollector + name: simplest-set-utilization +status: + currentMetrics: null + currentReplicas: 1 diff --git a/tests/e2e-autoscale/autoscale/04-install.yaml b/tests/e2e-autoscale/autoscale/04-install.yaml new file mode 100644 index 0000000000..8f32f16d37 --- /dev/null +++ b/tests/e2e-autoscale/autoscale/04-install.yaml @@ -0,0 +1,74 @@ +# This creates two different deployments: +# * The first one is to ensure an autoscaler is created without +# setting any metrics in the Spec. +# * The second is to check that scaling works properly and that +# spec updates are working as expected. +# +apiVersion: opentelemetry.io/v1alpha1 +kind: OpenTelemetryCollector +metadata: + name: simplest +spec: +# TODO: these tests use .Spec.MaxReplicas and .Spec.MinReplicas. These fields are +# deprecated and moved to .Spec.Autoscaler. Fine to use these fields to test that old CRD is +# still supported but should eventually be updated. + minReplicas: 1 + maxReplicas: 2 + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 5m + memory: 64Mi + + config: | + receivers: + otlp: + protocols: + grpc: + http: + processors: + + exporters: + debug: + + service: + pipelines: + traces: + receivers: [otlp] + processors: [] + exporters: [debug] +--- +apiVersion: opentelemetry.io/v1alpha1 +kind: OpenTelemetryCollector +metadata: + name: simplest-set-utilization +spec: + minReplicas: 1 + maxReplicas: 2 + resources: + limits: + cpu: 250m + memory: 128Mi + requests: + cpu: 5m + memory: 64Mi + + config: | + receivers: + otlp: + protocols: + grpc: + http: + processors: + + exporters: + debug: + + service: + pipelines: + traces: + receivers: [otlp] + processors: [] + exporters: [debug] diff --git a/tests/e2e-autoscale/autoscale/chainsaw-test.yaml b/tests/e2e-autoscale/autoscale/chainsaw-test.yaml index d61acf4643..6f19ced46a 100755 --- a/tests/e2e-autoscale/autoscale/chainsaw-test.yaml +++ b/tests/e2e-autoscale/autoscale/chainsaw-test.yaml @@ -33,3 +33,10 @@ spec: name: telemetrygen-set-utilization - assert: file: 03-assert.yaml + # applying the same OpenTelemetryCollector from the 00-step, but this time without the spec.autoscaler, which should inform the controller to delete the HPAs resources. So, we should get an error checking if the HPAs still exist + - name: step-04 + try: + - apply: + file: 04-install.yaml + - error: + file: 04-error.yaml diff --git a/tests/e2e/ingress/00-assert.yaml b/tests/e2e/ingress/00-assert.yaml index 5604513d10..79f736ee79 100644 --- a/tests/e2e/ingress/00-assert.yaml +++ b/tests/e2e/ingress/00-assert.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: simplest-collector + name: otel-simplest-collector status: readyReplicas: 1 --- @@ -13,14 +13,14 @@ metadata: something.com: "true" labels: app.kubernetes.io/managed-by: opentelemetry-operator - app.kubernetes.io/name: simplest-ingress - name: simplest-ingress + app.kubernetes.io/name: otel-simplest-ingress + name: otel-simplest-ingress ownerReferences: - apiVersion: opentelemetry.io/v1alpha1 blockOwnerDeletion: true controller: true kind: OpenTelemetryCollector - name: simplest + name: otel-simplest spec: rules: - host: example.com @@ -28,14 +28,14 @@ spec: paths: - backend: service: - name: simplest-collector + name: otel-simplest-collector port: name: otlp-grpc path: /otlp-grpc pathType: Prefix - backend: service: - name: simplest-collector + name: otel-simplest-collector port: name: otlp-http path: /otlp-http diff --git a/tests/e2e/ingress/00-install.yaml b/tests/e2e/ingress/00-install.yaml index a94b434a6c..ce10c55a73 100644 --- a/tests/e2e/ingress/00-install.yaml +++ b/tests/e2e/ingress/00-install.yaml @@ -2,9 +2,9 @@ apiVersion: opentelemetry.io/v1alpha1 kind: OpenTelemetryCollector metadata: - name: simplest + name: otel-simplest spec: - mode: "deployment" + mode: deployment ingress: type: ingress hostname: "example.com" diff --git a/tests/e2e/ingress/01-error.yaml b/tests/e2e/ingress/01-error.yaml new file mode 100644 index 0000000000..e7db9b4f03 --- /dev/null +++ b/tests/e2e/ingress/01-error.yaml @@ -0,0 +1,35 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + something.com: "true" + labels: + app.kubernetes.io/managed-by: opentelemetry-operator + app.kubernetes.io/name: otel-simplest-ingress + name: otel-simplest-ingress + ownerReferences: + - apiVersion: opentelemetry.io/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: OpenTelemetryCollector + name: otel-simplest +spec: + rules: + - host: example.com + http: + paths: + - backend: + service: + name: otel-simplest-collector + port: + name: otlp-grpc + path: /otlp-grpc + pathType: Prefix + - backend: + service: + name: otel-simplest-collector + port: + name: otlp-http + path: /otlp-http + pathType: Prefix diff --git a/tests/e2e/ingress/01-remove-ingress.yaml b/tests/e2e/ingress/01-remove-ingress.yaml new file mode 100644 index 0000000000..4a6f3f637e --- /dev/null +++ b/tests/e2e/ingress/01-remove-ingress.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: opentelemetry.io/v1alpha1 +kind: OpenTelemetryCollector +metadata: + name: otel-simplest +spec: + mode: deployment + ingress: ~ + config: | + receivers: + otlp: + protocols: + grpc: + http: + + exporters: + debug: + + service: + pipelines: + traces: + receivers: [otlp] + processors: [] + exporters: [debug] diff --git a/tests/e2e/ingress/chainsaw-test.yaml b/tests/e2e/ingress/chainsaw-test.yaml index 72a7fa61e6..e81b82d921 100755 --- a/tests/e2e/ingress/chainsaw-test.yaml +++ b/tests/e2e/ingress/chainsaw-test.yaml @@ -2,13 +2,26 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: - creationTimestamp: null name: ingress spec: + skip: false + concurrent: false + skipDelete: false + timeouts: + apply: 10s + assert: 10s + error: 10s steps: - - name: step-00 - try: - - apply: - file: 00-install.yaml - - assert: - file: 00-assert.yaml + - name: step-00 + try: + - apply: + file: 00-install.yaml + - assert: + file: 00-assert.yaml + # Applying the same OpenTelemetryCollector from the previous step, but this time without the spec.ingress, which should inform the controller to delete the ingress resource. So, we should get an error checking if the ingress still exists. + - name: step-01 + try: + - patch: + file: 01-remove-ingress.yaml + - error: + file: 01-error.yaml diff --git a/tests/e2e/statefulset-features/01-update-volume-claim-templates.yaml b/tests/e2e/statefulset-features/01-update-volume-claim-templates.yaml index bed1ca77c3..633d10b911 100644 --- a/tests/e2e/statefulset-features/01-update-volume-claim-templates.yaml +++ b/tests/e2e/statefulset-features/01-update-volume-claim-templates.yaml @@ -31,4 +31,4 @@ spec: traces: receivers: [jaeger] processors: [] - exporters: [debug] + exporters: [debug] \ No newline at end of file