From 211861ebb961e288ba685a5e235a29260999192e Mon Sep 17 00:00:00 2001 From: acmenezes Date: Mon, 31 Jul 2023 16:07:35 -0400 Subject: [PATCH 01/17] ADD v1beta2 with new Loki integration fields Signed-off-by: acmenezes --- Makefile | 7 + api/v1alpha1/doc.go | 2 +- api/v1alpha1/flowcollector_types.go | 1 + api/v1alpha1/flowcollector_webhook.go | 71 +- api/v1alpha1/zz_generated.conversion.go | 656 ++- api/v1beta1/doc.go | 1 + api/v1beta1/flowcollector_types.go | 4 +- api/v1beta1/flowcollector_webhook.go | 105 +- api/v1beta1/groupversion_info.go | 3 +- api/v1beta1/zz_generated.conversion.go | 1116 +++++ api/v1beta1/zz_generated.deepcopy.go | 2 +- api/v1beta2/doc.go | 15 + api/v1beta2/flowcollector_types.go | 867 ++++ api/v1beta2/flowcollector_webhook.go | 32 + api/v1beta2/groupversion_info.go | 36 + api/v1beta2/zz_generated.deepcopy.go | 698 +++ .../flows.netobserv.io_flowcollectors.yaml | 2350 +++++++++ ...observ-operator.clusterserviceversion.yaml | 3 + .../flows.netobserv.io_flowcollectors.yaml | 2485 ++++++++- .../samples/flows_v1beta2_flowcollector.yaml | 147 + ...flows_v1beta2_flowcollector_lokistack.yaml | 168 + .../consoleplugin/consoleplugin_objects.go | 10 +- .../consoleplugin/consoleplugin_reconciler.go | 17 +- .../consoleplugin/consoleplugin_test.go | 24 +- controllers/ebpf/agent_controller.go | 2 +- .../ebpf/internal/permissions/permissions.go | 2 +- controllers/flowcollector_controller.go | 2 +- ...wcollector_controller_certificates_test.go | 25 +- .../flowcollector_controller_console_test.go | 6 +- .../flowcollector_controller_ebpf_test.go | 2 +- .../flowcollector_controller_iso_test.go | 31 +- controllers/flowcollector_controller_test.go | 6 +- .../flowlogspipeline/flp_common_objects.go | 31 +- .../flowlogspipeline/flp_ingest_objects.go | 2 +- .../flowlogspipeline/flp_ingest_reconciler.go | 2 +- .../flowlogspipeline/flp_monolith_objects.go | 2 +- .../flp_monolith_reconciler.go | 4 +- .../flowlogspipeline/flp_reconciler.go | 2 +- controllers/flowlogspipeline/flp_test.go | 14 +- .../flowlogspipeline/flp_transfo_objects.go | 2 +- .../flp_transfo_reconciler.go | 4 +- controllers/ovs/flowsconfig_cno_reconciler.go | 2 +- .../ovs/flowsconfig_ovnk_reconciler.go | 2 +- controllers/ovs/flowsconfig_types.go | 2 +- controllers/suite_test.go | 4 + docs/FlowCollector.md | 4436 ++++++++++++++++- main.go | 4 +- pkg/helper/comparators.go | 2 +- pkg/helper/flowcollector.go | 101 +- pkg/volumes/builder.go | 2 +- pkg/watchers/object_ref.go | 2 +- pkg/watchers/watcher.go | 2 +- pkg/watchers/watcher_test.go | 2 +- 53 files changed, 13017 insertions(+), 503 deletions(-) create mode 100644 api/v1beta1/zz_generated.conversion.go create mode 100644 api/v1beta2/doc.go create mode 100644 api/v1beta2/flowcollector_types.go create mode 100644 api/v1beta2/flowcollector_webhook.go create mode 100644 api/v1beta2/groupversion_info.go create mode 100644 api/v1beta2/zz_generated.deepcopy.go create mode 100644 config/samples/flows_v1beta2_flowcollector.yaml create mode 100644 config/samples/flows_v1beta2_flowcollector_lokistack.yaml diff --git a/Makefile b/Makefile index 357eaed05..b6cf3b6f7 100644 --- a/Makefile +++ b/Makefile @@ -259,6 +259,13 @@ generate-go-conversions: $(CONVERSION_GEN) ## Run all generate-go-conversions --output-file-base=zz_generated.conversion \ $(CONVERSION_GEN_OUTPUT_BASE) \ --go-header-file=./hack/boilerplate/boilerplate.generatego.txt + $(MAKE) clean-generated-conversions SRC_DIRS="./api/v1beta1" + $(CONVERSION_GEN) \ + --input-dirs=./api/v1beta1 \ + --build-tag=ignore_autogenerated_core \ + --output-file-base=zz_generated.conversion \ + $(CONVERSION_GEN_OUTPUT_BASE) \ + --go-header-file=./hack/boilerplate/boilerplate.generatego.txt generate: gencode manifests doc generate-go-conversions ## Run all code/file generators diff --git a/api/v1alpha1/doc.go b/api/v1alpha1/doc.go index c49908aa2..c0105103f 100644 --- a/api/v1alpha1/doc.go +++ b/api/v1alpha1/doc.go @@ -12,5 +12,5 @@ limitations under the License. */ // Package v1aplha1 contains the v1alpha1 API implementation. -// +k8s:conversion-gen=github.com/netobserv/network-observability-operator/api/v1beta1 +// +k8s:conversion-gen=github.com/netobserv/network-observability-operator/api/v1beta2 package v1alpha1 diff --git a/api/v1alpha1/flowcollector_types.go b/api/v1alpha1/flowcollector_types.go index 36613224c..dc9ae8aaa 100644 --- a/api/v1alpha1/flowcollector_types.go +++ b/api/v1alpha1/flowcollector_types.go @@ -55,6 +55,7 @@ type FlowCollectorSpec struct { Processor FlowCollectorFLP `json:"processor,omitempty"` // loki, the flow store, client settings. + // +k8s:conversion-gen=false Loki FlowCollectorLoki `json:"loki,omitempty"` // consolePlugin defines the settings related to the OpenShift Console plugin, when available. diff --git a/api/v1alpha1/flowcollector_webhook.go b/api/v1alpha1/flowcollector_webhook.go index 66e17da35..80e50e20d 100644 --- a/api/v1alpha1/flowcollector_webhook.go +++ b/api/v1alpha1/flowcollector_webhook.go @@ -20,26 +20,26 @@ import ( "fmt" "reflect" - "github.com/netobserv/network-observability-operator/api/v1beta1" + "github.com/netobserv/network-observability-operator/api/v1beta2" utilconversion "github.com/netobserv/network-observability-operator/pkg/conversion" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" apiconversion "k8s.io/apimachinery/pkg/conversion" "sigs.k8s.io/controller-runtime/pkg/conversion" ) -// ConvertTo converts this v1alpha1 FlowCollector to its v1beta1 equivalent (the conversion Hub) +// ConvertTo converts this v1alpha1 FlowCollector to its v1beta2 equivalent (the conversion Hub) // https://book.kubebuilder.io/multiversion-tutorial/conversion.html func (r *FlowCollector) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.FlowCollector) + dst := dstRaw.(*v1beta2.FlowCollector) - if err := Convert_v1alpha1_FlowCollector_To_v1beta1_FlowCollector(r, dst, nil); err != nil { - return fmt.Errorf("copying v1alpha1.FlowCollector into v1beta1.FlowCollector: %w", err) + if err := Convert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector(r, dst, nil); err != nil { + return fmt.Errorf("copying v1alpha1.FlowCollector into v1beta2.FlowCollector: %w", err) } dst.Status.Conditions = make([]v1.Condition, len(r.Status.Conditions)) copy(dst.Status.Conditions, r.Status.Conditions) // Manually restore data. - restored := &v1beta1.FlowCollector{} + restored := &v1beta2.FlowCollector{} if ok, err := utilconversion.UnmarshalData(r, restored); err != nil || !ok { return err } @@ -69,10 +69,8 @@ func (r *FlowCollector) ConvertTo(dstRaw conversion.Hub) error { copy(dst.Spec.Agent.EBPF.Features, restored.Spec.Agent.EBPF.Features) } - dst.Spec.Loki.StatusTLS = restored.Spec.Loki.StatusTLS dst.Spec.Kafka.SASL = restored.Spec.Kafka.SASL - - dst.Spec.ConsolePlugin.Enable = restored.Spec.ConsolePlugin.Enable + dst.Spec.Loki.Manual = restored.Spec.Loki.Manual dst.Spec.Processor.Metrics.Server.TLS.InsecureSkipVerify = restored.Spec.Processor.Metrics.Server.TLS.InsecureSkipVerify dst.Spec.Processor.Metrics.Server.TLS.ProvidedCaFile = restored.Spec.Processor.Metrics.Server.TLS.ProvidedCaFile @@ -88,7 +86,7 @@ func (r *FlowCollector) ConvertTo(dstRaw conversion.Hub) error { return nil } -func isExporterIn(restoredExporter *v1beta1.FlowCollectorExporter, dstExporters []*v1beta1.FlowCollectorExporter) bool { +func isExporterIn(restoredExporter *v1beta2.FlowCollectorExporter, dstExporters []*v1beta2.FlowCollectorExporter) bool { for _, dstExp := range dstExporters { if reflect.DeepEqual(restoredExporter, dstExp) { @@ -98,12 +96,12 @@ func isExporterIn(restoredExporter *v1beta1.FlowCollectorExporter, dstExporters return false } -// ConvertFrom converts the hub version v1beta1 FlowCollector object to v1alpha1 +// ConvertFrom converts the hub version v1beta2 FlowCollector object to v1alpha1 func (r *FlowCollector) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.FlowCollector) + src := srcRaw.(*v1beta2.FlowCollector) - if err := Convert_v1beta1_FlowCollector_To_v1alpha1_FlowCollector(src, r, nil); err != nil { - return fmt.Errorf("copying v1beta1.FlowCollector into v1alpha1.FlowCollector: %w", err) + if err := Convert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(src, r, nil); err != nil { + return fmt.Errorf("copying v1beta2.FlowCollector into v1alpha1.FlowCollector: %w", err) } r.Status.Conditions = make([]v1.Condition, len(src.Status.Conditions)) copy(r.Status.Conditions, src.Status.Conditions) @@ -113,55 +111,62 @@ func (r *FlowCollector) ConvertFrom(srcRaw conversion.Hub) error { } func (r *FlowCollectorList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.FlowCollectorList) - return Convert_v1alpha1_FlowCollectorList_To_v1beta1_FlowCollectorList(r, dst, nil) + dst := dstRaw.(*v1beta2.FlowCollectorList) + return Convert_v1alpha1_FlowCollectorList_To_v1beta2_FlowCollectorList(r, dst, nil) } func (r *FlowCollectorList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.FlowCollectorList) - return Convert_v1beta1_FlowCollectorList_To_v1alpha1_FlowCollectorList(src, r, nil) + src := srcRaw.(*v1beta2.FlowCollectorList) + return Convert_v1beta2_FlowCollectorList_To_v1alpha1_FlowCollectorList(src, r, nil) } // This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta1 not in v1alpha1 +// we have new defined fields in v1beta2 not in v1alpha1 // nolint:golint,stylecheck,revive -func Convert_v1beta1_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(in *v1beta1.FlowCollectorFLP, out *FlowCollectorFLP, s apiconversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(in, out, s) +func Convert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(in *v1beta2.FlowCollectorFLP, out *FlowCollectorFLP, s apiconversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(in, out, s) } // This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta1 not in v1alpha1 +// we have new defined fields in v1beta2 not in v1alpha1 // nolint:golint,stylecheck,revive -func Convert_v1beta1_FLPMetrics_To_v1alpha1_FLPMetrics(in *v1beta1.FLPMetrics, out *FLPMetrics, s apiconversion.Scope) error { - return autoConvert_v1beta1_FLPMetrics_To_v1alpha1_FLPMetrics(in, out, s) +func Convert_v1beta2_FLPMetrics_To_v1alpha1_FLPMetrics(in *v1beta2.FLPMetrics, out *FLPMetrics, s apiconversion.Scope) error { + return autoConvert_v1beta2_FLPMetrics_To_v1alpha1_FLPMetrics(in, out, s) } // This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta1 not in v1alpha1 +// we have new defined fields in v1beta2 not in v1alpha1 +// nolint:golint,stylecheck,revive +func Convert_v1beta2_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(in *v1beta2.FlowCollectorLoki, out *FlowCollectorLoki, s apiconversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(in, out, s) +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have new defined fields in v1beta2 not in v1alpha1 // nolint:golint,stylecheck,revive -func Convert_v1beta1_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(in *v1beta1.FlowCollectorLoki, out *FlowCollectorLoki, s apiconversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(in, out, s) +func Convert_v1alpha1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in *FlowCollectorLoki, out *v1beta2.FlowCollectorLoki, s apiconversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in, out, s) } // This function need to be manually created because conversion-gen not able to create it intentionally because // we have new defined fields in v1beta1 not in v1alpha1 // nolint:golint,stylecheck,revive -func Convert_v1beta1_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(in *v1beta1.FlowCollectorConsolePlugin, out *FlowCollectorConsolePlugin, s apiconversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(in, out, s) +func Convert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(in *v1beta2.FlowCollectorConsolePlugin, out *FlowCollectorConsolePlugin, s apiconversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(in, out, s) } // This function need to be manually created because conversion-gen not able to create it intentionally because // we have new defined fields in v1beta1 not in v1alpha1 // nolint:golint,stylecheck,revive -func Convert_v1beta1_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(in *v1beta1.FlowCollectorExporter, out *FlowCollectorExporter, s apiconversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(in, out, s) +func Convert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(in *v1beta2.FlowCollectorExporter, out *FlowCollectorExporter, s apiconversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(in, out, s) } // This function need to be manually created because conversion-gen not able to create it intentionally because // we have new defined fields in v1beta1 not in v1alpha1 // nolint:golint,stylecheck,revive -func Convert_v1beta1_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in *v1beta1.FlowCollectorEBPF, out *FlowCollectorEBPF, s apiconversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in, out, s) +func Convert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEBPF, out *FlowCollectorEBPF, s apiconversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in, out, s) } // // This function need to be manually created because conversion-gen not able to create it intentionally because diff --git a/api/v1alpha1/zz_generated.conversion.go b/api/v1alpha1/zz_generated.conversion.go index 485017f85..93308e832 100644 --- a/api/v1alpha1/zz_generated.conversion.go +++ b/api/v1alpha1/zz_generated.conversion.go @@ -24,7 +24,7 @@ package v1alpha1 import ( unsafe "unsafe" - v1beta1 "github.com/netobserv/network-observability-operator/api/v1beta1" + v1beta2 "github.com/netobserv/network-observability-operator/api/v1beta2" v2 "k8s.io/api/autoscaling/v2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" @@ -38,228 +38,233 @@ func init() { // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*CertificateReference)(nil), (*v1beta1.CertificateReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_CertificateReference_To_v1beta1_CertificateReference(a.(*CertificateReference), b.(*v1beta1.CertificateReference), scope) + if err := s.AddGeneratedConversionFunc((*CertificateReference)(nil), (*v1beta2.CertificateReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference(a.(*CertificateReference), b.(*v1beta2.CertificateReference), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.CertificateReference)(nil), (*CertificateReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_CertificateReference_To_v1alpha1_CertificateReference(a.(*v1beta1.CertificateReference), b.(*CertificateReference), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.CertificateReference)(nil), (*CertificateReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference(a.(*v1beta2.CertificateReference), b.(*CertificateReference), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*ClientTLS)(nil), (*v1beta1.ClientTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ClientTLS_To_v1beta1_ClientTLS(a.(*ClientTLS), b.(*v1beta1.ClientTLS), scope) + if err := s.AddGeneratedConversionFunc((*ClientTLS)(nil), (*v1beta2.ClientTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS(a.(*ClientTLS), b.(*v1beta2.ClientTLS), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.ClientTLS)(nil), (*ClientTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ClientTLS_To_v1alpha1_ClientTLS(a.(*v1beta1.ClientTLS), b.(*ClientTLS), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.ClientTLS)(nil), (*ClientTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS(a.(*v1beta2.ClientTLS), b.(*ClientTLS), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*ClusterNetworkOperatorConfig)(nil), (*v1beta1.ClusterNetworkOperatorConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig(a.(*ClusterNetworkOperatorConfig), b.(*v1beta1.ClusterNetworkOperatorConfig), scope) + if err := s.AddGeneratedConversionFunc((*ClusterNetworkOperatorConfig)(nil), (*v1beta2.ClusterNetworkOperatorConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(a.(*ClusterNetworkOperatorConfig), b.(*v1beta2.ClusterNetworkOperatorConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.ClusterNetworkOperatorConfig)(nil), (*ClusterNetworkOperatorConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(a.(*v1beta1.ClusterNetworkOperatorConfig), b.(*ClusterNetworkOperatorConfig), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.ClusterNetworkOperatorConfig)(nil), (*ClusterNetworkOperatorConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(a.(*v1beta2.ClusterNetworkOperatorConfig), b.(*ClusterNetworkOperatorConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*ConsolePluginPortConfig)(nil), (*v1beta1.ConsolePluginPortConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig(a.(*ConsolePluginPortConfig), b.(*v1beta1.ConsolePluginPortConfig), scope) + if err := s.AddGeneratedConversionFunc((*ConsolePluginPortConfig)(nil), (*v1beta2.ConsolePluginPortConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(a.(*ConsolePluginPortConfig), b.(*v1beta2.ConsolePluginPortConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.ConsolePluginPortConfig)(nil), (*ConsolePluginPortConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(a.(*v1beta1.ConsolePluginPortConfig), b.(*ConsolePluginPortConfig), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.ConsolePluginPortConfig)(nil), (*ConsolePluginPortConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(a.(*v1beta2.ConsolePluginPortConfig), b.(*ConsolePluginPortConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*DebugConfig)(nil), (*v1beta1.DebugConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_DebugConfig_To_v1beta1_DebugConfig(a.(*DebugConfig), b.(*v1beta1.DebugConfig), scope) + if err := s.AddGeneratedConversionFunc((*DebugConfig)(nil), (*v1beta2.DebugConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_DebugConfig_To_v1beta2_DebugConfig(a.(*DebugConfig), b.(*v1beta2.DebugConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DebugConfig)(nil), (*DebugConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DebugConfig_To_v1alpha1_DebugConfig(a.(*v1beta1.DebugConfig), b.(*DebugConfig), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.DebugConfig)(nil), (*DebugConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DebugConfig_To_v1alpha1_DebugConfig(a.(*v1beta2.DebugConfig), b.(*DebugConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FLPMetrics)(nil), (*v1beta1.FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FLPMetrics_To_v1beta1_FLPMetrics(a.(*FLPMetrics), b.(*v1beta1.FLPMetrics), scope) + if err := s.AddGeneratedConversionFunc((*FLPMetrics)(nil), (*v1beta2.FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(a.(*FLPMetrics), b.(*v1beta2.FLPMetrics), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollector)(nil), (*v1beta1.FlowCollector)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollector_To_v1beta1_FlowCollector(a.(*FlowCollector), b.(*v1beta1.FlowCollector), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollector)(nil), (*v1beta2.FlowCollector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector(a.(*FlowCollector), b.(*v1beta2.FlowCollector), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.FlowCollector)(nil), (*FlowCollector)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollector_To_v1alpha1_FlowCollector(a.(*v1beta1.FlowCollector), b.(*FlowCollector), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollector)(nil), (*FlowCollector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(a.(*v1beta2.FlowCollector), b.(*FlowCollector), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorAgent)(nil), (*v1beta1.FlowCollectorAgent)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(a.(*FlowCollectorAgent), b.(*v1beta1.FlowCollectorAgent), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorAgent)(nil), (*v1beta2.FlowCollectorAgent)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(a.(*FlowCollectorAgent), b.(*v1beta2.FlowCollectorAgent), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.FlowCollectorAgent)(nil), (*FlowCollectorAgent)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(a.(*v1beta1.FlowCollectorAgent), b.(*FlowCollectorAgent), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorAgent)(nil), (*FlowCollectorAgent)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(a.(*v1beta2.FlowCollectorAgent), b.(*FlowCollectorAgent), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorConsolePlugin)(nil), (*v1beta1.FlowCollectorConsolePlugin)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(a.(*FlowCollectorConsolePlugin), b.(*v1beta1.FlowCollectorConsolePlugin), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorConsolePlugin)(nil), (*v1beta2.FlowCollectorConsolePlugin)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(a.(*FlowCollectorConsolePlugin), b.(*v1beta2.FlowCollectorConsolePlugin), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorEBPF)(nil), (*v1beta1.FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(a.(*FlowCollectorEBPF), b.(*v1beta1.FlowCollectorEBPF), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorEBPF)(nil), (*v1beta2.FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(a.(*FlowCollectorEBPF), b.(*v1beta2.FlowCollectorEBPF), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorExporter)(nil), (*v1beta1.FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter(a.(*FlowCollectorExporter), b.(*v1beta1.FlowCollectorExporter), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorExporter)(nil), (*v1beta2.FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(a.(*FlowCollectorExporter), b.(*v1beta2.FlowCollectorExporter), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorFLP)(nil), (*v1beta1.FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(a.(*FlowCollectorFLP), b.(*v1beta1.FlowCollectorFLP), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorFLP)(nil), (*v1beta2.FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(a.(*FlowCollectorFLP), b.(*v1beta2.FlowCollectorFLP), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorHPA)(nil), (*v1beta1.FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(a.(*FlowCollectorHPA), b.(*v1beta1.FlowCollectorHPA), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorHPA)(nil), (*v1beta2.FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(a.(*FlowCollectorHPA), b.(*v1beta2.FlowCollectorHPA), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.FlowCollectorHPA)(nil), (*FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(a.(*v1beta1.FlowCollectorHPA), b.(*FlowCollectorHPA), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorHPA)(nil), (*FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(a.(*v1beta2.FlowCollectorHPA), b.(*FlowCollectorHPA), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorIPFIX)(nil), (*v1beta1.FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(a.(*FlowCollectorIPFIX), b.(*v1beta1.FlowCollectorIPFIX), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorIPFIX)(nil), (*v1beta2.FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(a.(*FlowCollectorIPFIX), b.(*v1beta2.FlowCollectorIPFIX), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.FlowCollectorIPFIX)(nil), (*FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(a.(*v1beta1.FlowCollectorIPFIX), b.(*FlowCollectorIPFIX), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorIPFIX)(nil), (*FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(a.(*v1beta2.FlowCollectorIPFIX), b.(*FlowCollectorIPFIX), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorKafka)(nil), (*v1beta1.FlowCollectorKafka)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(a.(*FlowCollectorKafka), b.(*v1beta1.FlowCollectorKafka), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorKafka)(nil), (*v1beta2.FlowCollectorKafka)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(a.(*FlowCollectorKafka), b.(*v1beta2.FlowCollectorKafka), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.FlowCollectorKafka)(nil), (*FlowCollectorKafka)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(a.(*v1beta1.FlowCollectorKafka), b.(*FlowCollectorKafka), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorKafka)(nil), (*FlowCollectorKafka)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(a.(*v1beta2.FlowCollectorKafka), b.(*FlowCollectorKafka), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorList)(nil), (*v1beta1.FlowCollectorList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorList_To_v1beta1_FlowCollectorList(a.(*FlowCollectorList), b.(*v1beta1.FlowCollectorList), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorList)(nil), (*v1beta2.FlowCollectorList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorList_To_v1beta2_FlowCollectorList(a.(*FlowCollectorList), b.(*v1beta2.FlowCollectorList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.FlowCollectorList)(nil), (*FlowCollectorList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorList_To_v1alpha1_FlowCollectorList(a.(*v1beta1.FlowCollectorList), b.(*FlowCollectorList), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorList)(nil), (*FlowCollectorList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorList_To_v1alpha1_FlowCollectorList(a.(*v1beta2.FlowCollectorList), b.(*FlowCollectorList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorLoki)(nil), (*v1beta1.FlowCollectorLoki)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorLoki_To_v1beta1_FlowCollectorLoki(a.(*FlowCollectorLoki), b.(*v1beta1.FlowCollectorLoki), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorSpec)(nil), (*v1beta2.FlowCollectorSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(a.(*FlowCollectorSpec), b.(*v1beta2.FlowCollectorSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorSpec)(nil), (*v1beta1.FlowCollectorSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(a.(*FlowCollectorSpec), b.(*v1beta1.FlowCollectorSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorSpec)(nil), (*FlowCollectorSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(a.(*v1beta2.FlowCollectorSpec), b.(*FlowCollectorSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.FlowCollectorSpec)(nil), (*FlowCollectorSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(a.(*v1beta1.FlowCollectorSpec), b.(*FlowCollectorSpec), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorStatus)(nil), (*v1beta2.FlowCollectorStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(a.(*FlowCollectorStatus), b.(*v1beta2.FlowCollectorStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorStatus)(nil), (*v1beta1.FlowCollectorStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorStatus_To_v1beta1_FlowCollectorStatus(a.(*FlowCollectorStatus), b.(*v1beta1.FlowCollectorStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorStatus)(nil), (*FlowCollectorStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(a.(*v1beta2.FlowCollectorStatus), b.(*FlowCollectorStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.FlowCollectorStatus)(nil), (*FlowCollectorStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(a.(*v1beta1.FlowCollectorStatus), b.(*FlowCollectorStatus), scope) + if err := s.AddGeneratedConversionFunc((*MetricsServerConfig)(nil), (*v1beta2.MetricsServerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(a.(*MetricsServerConfig), b.(*v1beta2.MetricsServerConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*MetricsServerConfig)(nil), (*v1beta1.MetricsServerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_MetricsServerConfig_To_v1beta1_MetricsServerConfig(a.(*MetricsServerConfig), b.(*v1beta1.MetricsServerConfig), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.MetricsServerConfig)(nil), (*MetricsServerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(a.(*v1beta2.MetricsServerConfig), b.(*MetricsServerConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.MetricsServerConfig)(nil), (*MetricsServerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(a.(*v1beta1.MetricsServerConfig), b.(*MetricsServerConfig), scope) + if err := s.AddGeneratedConversionFunc((*OVNKubernetesConfig)(nil), (*v1beta2.OVNKubernetesConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(a.(*OVNKubernetesConfig), b.(*v1beta2.OVNKubernetesConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*OVNKubernetesConfig)(nil), (*v1beta1.OVNKubernetesConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig(a.(*OVNKubernetesConfig), b.(*v1beta1.OVNKubernetesConfig), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.OVNKubernetesConfig)(nil), (*OVNKubernetesConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(a.(*v1beta2.OVNKubernetesConfig), b.(*OVNKubernetesConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.OVNKubernetesConfig)(nil), (*OVNKubernetesConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(a.(*v1beta1.OVNKubernetesConfig), b.(*OVNKubernetesConfig), scope) + if err := s.AddGeneratedConversionFunc((*QuickFilter)(nil), (*v1beta2.QuickFilter)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_QuickFilter_To_v1beta2_QuickFilter(a.(*QuickFilter), b.(*v1beta2.QuickFilter), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*QuickFilter)(nil), (*v1beta1.QuickFilter)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_QuickFilter_To_v1beta1_QuickFilter(a.(*QuickFilter), b.(*v1beta1.QuickFilter), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.QuickFilter)(nil), (*QuickFilter)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_QuickFilter_To_v1alpha1_QuickFilter(a.(*v1beta2.QuickFilter), b.(*QuickFilter), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.QuickFilter)(nil), (*QuickFilter)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_QuickFilter_To_v1alpha1_QuickFilter(a.(*v1beta1.QuickFilter), b.(*QuickFilter), scope) + if err := s.AddGeneratedConversionFunc((*ServerTLS)(nil), (*v1beta2.ServerTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(a.(*ServerTLS), b.(*v1beta2.ServerTLS), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*ServerTLS)(nil), (*v1beta1.ServerTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ServerTLS_To_v1beta1_ServerTLS(a.(*ServerTLS), b.(*v1beta1.ServerTLS), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.ServerTLS)(nil), (*ServerTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(a.(*v1beta2.ServerTLS), b.(*ServerTLS), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.FLPMetrics)(nil), (*FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FLPMetrics_To_v1alpha1_FLPMetrics(a.(*v1beta1.FLPMetrics), b.(*FLPMetrics), scope) + if err := s.AddConversionFunc((*FlowCollectorLoki)(nil), (*v1beta2.FlowCollectorLoki)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(a.(*FlowCollectorLoki), b.(*v1beta2.FlowCollectorLoki), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.FlowCollectorConsolePlugin)(nil), (*FlowCollectorConsolePlugin)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(a.(*v1beta1.FlowCollectorConsolePlugin), b.(*FlowCollectorConsolePlugin), scope) + if err := s.AddConversionFunc((*v1beta2.FLPMetrics)(nil), (*FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FLPMetrics_To_v1alpha1_FLPMetrics(a.(*v1beta2.FLPMetrics), b.(*FLPMetrics), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.FlowCollectorEBPF)(nil), (*FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(a.(*v1beta1.FlowCollectorEBPF), b.(*FlowCollectorEBPF), scope) + if err := s.AddConversionFunc((*v1beta2.FlowCollectorConsolePlugin)(nil), (*FlowCollectorConsolePlugin)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(a.(*v1beta2.FlowCollectorConsolePlugin), b.(*FlowCollectorConsolePlugin), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.FlowCollectorExporter)(nil), (*FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(a.(*v1beta1.FlowCollectorExporter), b.(*FlowCollectorExporter), scope) + if err := s.AddConversionFunc((*v1beta2.FlowCollectorEBPF)(nil), (*FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(a.(*v1beta2.FlowCollectorEBPF), b.(*FlowCollectorEBPF), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.FlowCollectorFLP)(nil), (*FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(a.(*v1beta1.FlowCollectorFLP), b.(*FlowCollectorFLP), scope) + if err := s.AddConversionFunc((*v1beta2.FlowCollectorExporter)(nil), (*FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(a.(*v1beta2.FlowCollectorExporter), b.(*FlowCollectorExporter), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.FlowCollectorLoki)(nil), (*FlowCollectorLoki)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(a.(*v1beta1.FlowCollectorLoki), b.(*FlowCollectorLoki), scope) + if err := s.AddConversionFunc((*v1beta2.FlowCollectorFLP)(nil), (*FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(a.(*v1beta2.FlowCollectorFLP), b.(*FlowCollectorFLP), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.FlowCollectorLoki)(nil), (*FlowCollectorLoki)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(a.(*v1beta2.FlowCollectorLoki), b.(*FlowCollectorLoki), scope) }); err != nil { return err } @@ -271,8 +276,8 @@ func RegisterConversions(s *runtime.Scheme) error { return nil } -func autoConvert_v1alpha1_CertificateReference_To_v1beta1_CertificateReference(in *CertificateReference, out *v1beta1.CertificateReference, s conversion.Scope) error { - out.Type = v1beta1.MountableType(in.Type) +func autoConvert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference(in *CertificateReference, out *v1beta2.CertificateReference, s conversion.Scope) error { + out.Type = v1beta2.MountableType(in.Type) out.Name = in.Name out.CertFile = in.CertFile out.CertKey = in.CertKey @@ -280,12 +285,12 @@ func autoConvert_v1alpha1_CertificateReference_To_v1beta1_CertificateReference(i return nil } -// Convert_v1alpha1_CertificateReference_To_v1beta1_CertificateReference is an autogenerated conversion function. -func Convert_v1alpha1_CertificateReference_To_v1beta1_CertificateReference(in *CertificateReference, out *v1beta1.CertificateReference, s conversion.Scope) error { - return autoConvert_v1alpha1_CertificateReference_To_v1beta1_CertificateReference(in, out, s) +// Convert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference is an autogenerated conversion function. +func Convert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference(in *CertificateReference, out *v1beta2.CertificateReference, s conversion.Scope) error { + return autoConvert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference(in, out, s) } -func autoConvert_v1beta1_CertificateReference_To_v1alpha1_CertificateReference(in *v1beta1.CertificateReference, out *CertificateReference, s conversion.Scope) error { +func autoConvert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference(in *v1beta2.CertificateReference, out *CertificateReference, s conversion.Scope) error { out.Type = MountableType(in.Type) out.Name = in.Name out.Namespace = in.Namespace @@ -294,66 +299,66 @@ func autoConvert_v1beta1_CertificateReference_To_v1alpha1_CertificateReference(i return nil } -// Convert_v1beta1_CertificateReference_To_v1alpha1_CertificateReference is an autogenerated conversion function. -func Convert_v1beta1_CertificateReference_To_v1alpha1_CertificateReference(in *v1beta1.CertificateReference, out *CertificateReference, s conversion.Scope) error { - return autoConvert_v1beta1_CertificateReference_To_v1alpha1_CertificateReference(in, out, s) +// Convert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference is an autogenerated conversion function. +func Convert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference(in *v1beta2.CertificateReference, out *CertificateReference, s conversion.Scope) error { + return autoConvert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference(in, out, s) } -func autoConvert_v1alpha1_ClientTLS_To_v1beta1_ClientTLS(in *ClientTLS, out *v1beta1.ClientTLS, s conversion.Scope) error { +func autoConvert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS(in *ClientTLS, out *v1beta2.ClientTLS, s conversion.Scope) error { out.Enable = in.Enable out.InsecureSkipVerify = in.InsecureSkipVerify - if err := Convert_v1alpha1_CertificateReference_To_v1beta1_CertificateReference(&in.CACert, &out.CACert, s); err != nil { + if err := Convert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference(&in.CACert, &out.CACert, s); err != nil { return err } - if err := Convert_v1alpha1_CertificateReference_To_v1beta1_CertificateReference(&in.UserCert, &out.UserCert, s); err != nil { + if err := Convert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference(&in.UserCert, &out.UserCert, s); err != nil { return err } return nil } -// Convert_v1alpha1_ClientTLS_To_v1beta1_ClientTLS is an autogenerated conversion function. -func Convert_v1alpha1_ClientTLS_To_v1beta1_ClientTLS(in *ClientTLS, out *v1beta1.ClientTLS, s conversion.Scope) error { - return autoConvert_v1alpha1_ClientTLS_To_v1beta1_ClientTLS(in, out, s) +// Convert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS is an autogenerated conversion function. +func Convert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS(in *ClientTLS, out *v1beta2.ClientTLS, s conversion.Scope) error { + return autoConvert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS(in, out, s) } -func autoConvert_v1beta1_ClientTLS_To_v1alpha1_ClientTLS(in *v1beta1.ClientTLS, out *ClientTLS, s conversion.Scope) error { +func autoConvert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS(in *v1beta2.ClientTLS, out *ClientTLS, s conversion.Scope) error { out.Enable = in.Enable out.InsecureSkipVerify = in.InsecureSkipVerify - if err := Convert_v1beta1_CertificateReference_To_v1alpha1_CertificateReference(&in.CACert, &out.CACert, s); err != nil { + if err := Convert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference(&in.CACert, &out.CACert, s); err != nil { return err } - if err := Convert_v1beta1_CertificateReference_To_v1alpha1_CertificateReference(&in.UserCert, &out.UserCert, s); err != nil { + if err := Convert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference(&in.UserCert, &out.UserCert, s); err != nil { return err } return nil } -// Convert_v1beta1_ClientTLS_To_v1alpha1_ClientTLS is an autogenerated conversion function. -func Convert_v1beta1_ClientTLS_To_v1alpha1_ClientTLS(in *v1beta1.ClientTLS, out *ClientTLS, s conversion.Scope) error { - return autoConvert_v1beta1_ClientTLS_To_v1alpha1_ClientTLS(in, out, s) +// Convert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS is an autogenerated conversion function. +func Convert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS(in *v1beta2.ClientTLS, out *ClientTLS, s conversion.Scope) error { + return autoConvert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS(in, out, s) } -func autoConvert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig(in *ClusterNetworkOperatorConfig, out *v1beta1.ClusterNetworkOperatorConfig, s conversion.Scope) error { +func autoConvert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(in *ClusterNetworkOperatorConfig, out *v1beta2.ClusterNetworkOperatorConfig, s conversion.Scope) error { out.Namespace = in.Namespace return nil } -// Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig is an autogenerated conversion function. -func Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig(in *ClusterNetworkOperatorConfig, out *v1beta1.ClusterNetworkOperatorConfig, s conversion.Scope) error { - return autoConvert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig(in, out, s) +// Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig is an autogenerated conversion function. +func Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(in *ClusterNetworkOperatorConfig, out *v1beta2.ClusterNetworkOperatorConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(in, out, s) } -func autoConvert_v1beta1_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(in *v1beta1.ClusterNetworkOperatorConfig, out *ClusterNetworkOperatorConfig, s conversion.Scope) error { +func autoConvert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(in *v1beta2.ClusterNetworkOperatorConfig, out *ClusterNetworkOperatorConfig, s conversion.Scope) error { out.Namespace = in.Namespace return nil } -// Convert_v1beta1_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig is an autogenerated conversion function. -func Convert_v1beta1_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(in *v1beta1.ClusterNetworkOperatorConfig, out *ClusterNetworkOperatorConfig, s conversion.Scope) error { - return autoConvert_v1beta1_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(in, out, s) +// Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig is an autogenerated conversion function. +func Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(in *v1beta2.ClusterNetworkOperatorConfig, out *ClusterNetworkOperatorConfig, s conversion.Scope) error { + return autoConvert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(in, out, s) } -func autoConvert_v1alpha1_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig(in *ConsolePluginPortConfig, out *v1beta1.ConsolePluginPortConfig, s conversion.Scope) error { +func autoConvert_v1alpha1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(in *ConsolePluginPortConfig, out *v1beta2.ConsolePluginPortConfig, s conversion.Scope) error { if err := v1.Convert_bool_To_Pointer_bool(&in.Enable, &out.Enable, s); err != nil { return err } @@ -361,12 +366,12 @@ func autoConvert_v1alpha1_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortCo return nil } -// Convert_v1alpha1_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig is an autogenerated conversion function. -func Convert_v1alpha1_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig(in *ConsolePluginPortConfig, out *v1beta1.ConsolePluginPortConfig, s conversion.Scope) error { - return autoConvert_v1alpha1_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig(in, out, s) +// Convert_v1alpha1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig is an autogenerated conversion function. +func Convert_v1alpha1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(in *ConsolePluginPortConfig, out *v1beta2.ConsolePluginPortConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(in, out, s) } -func autoConvert_v1beta1_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(in *v1beta1.ConsolePluginPortConfig, out *ConsolePluginPortConfig, s conversion.Scope) error { +func autoConvert_v1beta2_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(in *v1beta2.ConsolePluginPortConfig, out *ConsolePluginPortConfig, s conversion.Scope) error { if err := v1.Convert_Pointer_bool_To_bool(&in.Enable, &out.Enable, s); err != nil { return err } @@ -374,46 +379,46 @@ func autoConvert_v1beta1_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortCo return nil } -// Convert_v1beta1_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig is an autogenerated conversion function. -func Convert_v1beta1_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(in *v1beta1.ConsolePluginPortConfig, out *ConsolePluginPortConfig, s conversion.Scope) error { - return autoConvert_v1beta1_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(in, out, s) +// Convert_v1beta2_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig is an autogenerated conversion function. +func Convert_v1beta2_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(in *v1beta2.ConsolePluginPortConfig, out *ConsolePluginPortConfig, s conversion.Scope) error { + return autoConvert_v1beta2_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(in, out, s) } -func autoConvert_v1alpha1_DebugConfig_To_v1beta1_DebugConfig(in *DebugConfig, out *v1beta1.DebugConfig, s conversion.Scope) error { +func autoConvert_v1alpha1_DebugConfig_To_v1beta2_DebugConfig(in *DebugConfig, out *v1beta2.DebugConfig, s conversion.Scope) error { out.Env = *(*map[string]string)(unsafe.Pointer(&in.Env)) return nil } -// Convert_v1alpha1_DebugConfig_To_v1beta1_DebugConfig is an autogenerated conversion function. -func Convert_v1alpha1_DebugConfig_To_v1beta1_DebugConfig(in *DebugConfig, out *v1beta1.DebugConfig, s conversion.Scope) error { - return autoConvert_v1alpha1_DebugConfig_To_v1beta1_DebugConfig(in, out, s) +// Convert_v1alpha1_DebugConfig_To_v1beta2_DebugConfig is an autogenerated conversion function. +func Convert_v1alpha1_DebugConfig_To_v1beta2_DebugConfig(in *DebugConfig, out *v1beta2.DebugConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_DebugConfig_To_v1beta2_DebugConfig(in, out, s) } -func autoConvert_v1beta1_DebugConfig_To_v1alpha1_DebugConfig(in *v1beta1.DebugConfig, out *DebugConfig, s conversion.Scope) error { +func autoConvert_v1beta2_DebugConfig_To_v1alpha1_DebugConfig(in *v1beta2.DebugConfig, out *DebugConfig, s conversion.Scope) error { out.Env = *(*map[string]string)(unsafe.Pointer(&in.Env)) return nil } -// Convert_v1beta1_DebugConfig_To_v1alpha1_DebugConfig is an autogenerated conversion function. -func Convert_v1beta1_DebugConfig_To_v1alpha1_DebugConfig(in *v1beta1.DebugConfig, out *DebugConfig, s conversion.Scope) error { - return autoConvert_v1beta1_DebugConfig_To_v1alpha1_DebugConfig(in, out, s) +// Convert_v1beta2_DebugConfig_To_v1alpha1_DebugConfig is an autogenerated conversion function. +func Convert_v1beta2_DebugConfig_To_v1alpha1_DebugConfig(in *v1beta2.DebugConfig, out *DebugConfig, s conversion.Scope) error { + return autoConvert_v1beta2_DebugConfig_To_v1alpha1_DebugConfig(in, out, s) } -func autoConvert_v1alpha1_FLPMetrics_To_v1beta1_FLPMetrics(in *FLPMetrics, out *v1beta1.FLPMetrics, s conversion.Scope) error { - if err := Convert_v1alpha1_MetricsServerConfig_To_v1beta1_MetricsServerConfig(&in.Server, &out.Server, s); err != nil { +func autoConvert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(in *FLPMetrics, out *v1beta2.FLPMetrics, s conversion.Scope) error { + if err := Convert_v1alpha1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(&in.Server, &out.Server, s); err != nil { return err } out.IgnoreTags = *(*[]string)(unsafe.Pointer(&in.IgnoreTags)) return nil } -// Convert_v1alpha1_FLPMetrics_To_v1beta1_FLPMetrics is an autogenerated conversion function. -func Convert_v1alpha1_FLPMetrics_To_v1beta1_FLPMetrics(in *FLPMetrics, out *v1beta1.FLPMetrics, s conversion.Scope) error { - return autoConvert_v1alpha1_FLPMetrics_To_v1beta1_FLPMetrics(in, out, s) +// Convert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics is an autogenerated conversion function. +func Convert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(in *FLPMetrics, out *v1beta2.FLPMetrics, s conversion.Scope) error { + return autoConvert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(in, out, s) } -func autoConvert_v1beta1_FLPMetrics_To_v1alpha1_FLPMetrics(in *v1beta1.FLPMetrics, out *FLPMetrics, s conversion.Scope) error { - if err := Convert_v1beta1_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(&in.Server, &out.Server, s); err != nil { +func autoConvert_v1beta2_FLPMetrics_To_v1alpha1_FLPMetrics(in *v1beta2.FLPMetrics, out *FLPMetrics, s conversion.Scope) error { + if err := Convert_v1beta2_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(&in.Server, &out.Server, s); err != nil { return err } out.IgnoreTags = *(*[]string)(unsafe.Pointer(&in.IgnoreTags)) @@ -421,71 +426,71 @@ func autoConvert_v1beta1_FLPMetrics_To_v1alpha1_FLPMetrics(in *v1beta1.FLPMetric return nil } -func autoConvert_v1alpha1_FlowCollector_To_v1beta1_FlowCollector(in *FlowCollector, out *v1beta1.FlowCollector, s conversion.Scope) error { +func autoConvert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector(in *FlowCollector, out *v1beta2.FlowCollector, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha1_FlowCollectorStatus_To_v1beta1_FlowCollectorStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha1_FlowCollector_To_v1beta1_FlowCollector is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollector_To_v1beta1_FlowCollector(in *FlowCollector, out *v1beta1.FlowCollector, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollector_To_v1beta1_FlowCollector(in, out, s) +// Convert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector(in *FlowCollector, out *v1beta2.FlowCollector, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector(in, out, s) } -func autoConvert_v1beta1_FlowCollector_To_v1alpha1_FlowCollector(in *v1beta1.FlowCollector, out *FlowCollector, s conversion.Scope) error { +func autoConvert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(in *v1beta2.FlowCollector, out *FlowCollector, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1beta1_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1beta2_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1beta1_FlowCollector_To_v1alpha1_FlowCollector is an autogenerated conversion function. -func Convert_v1beta1_FlowCollector_To_v1alpha1_FlowCollector(in *v1beta1.FlowCollector, out *FlowCollector, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollector_To_v1alpha1_FlowCollector(in, out, s) +// Convert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector is an autogenerated conversion function. +func Convert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(in *v1beta2.FlowCollector, out *FlowCollector, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(in, out, s) } -func autoConvert_v1alpha1_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(in *FlowCollectorAgent, out *v1beta1.FlowCollectorAgent, s conversion.Scope) error { +func autoConvert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in *FlowCollectorAgent, out *v1beta2.FlowCollectorAgent, s conversion.Scope) error { out.Type = in.Type - if err := Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { + if err := Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { return err } - if err := Convert_v1alpha1_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { + if err := Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { return err } return nil } -// Convert_v1alpha1_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(in *FlowCollectorAgent, out *v1beta1.FlowCollectorAgent, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(in, out, s) +// Convert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in *FlowCollectorAgent, out *v1beta2.FlowCollectorAgent, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in, out, s) } -func autoConvert_v1beta1_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(in *v1beta1.FlowCollectorAgent, out *FlowCollectorAgent, s conversion.Scope) error { +func autoConvert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(in *v1beta2.FlowCollectorAgent, out *FlowCollectorAgent, s conversion.Scope) error { out.Type = in.Type - if err := Convert_v1beta1_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { + if err := Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { return err } - if err := Convert_v1beta1_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { + if err := Convert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { return err } return nil } -// Convert_v1beta1_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(in *v1beta1.FlowCollectorAgent, out *FlowCollectorAgent, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(in, out, s) +// Convert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(in *v1beta2.FlowCollectorAgent, out *FlowCollectorAgent, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(in, out, s) } -func autoConvert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(in *FlowCollectorConsolePlugin, out *v1beta1.FlowCollectorConsolePlugin, s conversion.Scope) error { +func autoConvert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in *FlowCollectorConsolePlugin, out *v1beta2.FlowCollectorConsolePlugin, s conversion.Scope) error { if err := v1.Convert_bool_To_Pointer_bool(&in.Register, &out.Register, s); err != nil { return err } @@ -496,22 +501,22 @@ func autoConvert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorCon out.ImagePullPolicy = in.ImagePullPolicy out.Resources = in.Resources out.LogLevel = in.LogLevel - if err := Convert_v1alpha1_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(&in.Autoscaler, &out.Autoscaler, s); err != nil { + if err := Convert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(&in.Autoscaler, &out.Autoscaler, s); err != nil { return err } - if err := Convert_v1alpha1_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig(&in.PortNaming, &out.PortNaming, s); err != nil { + if err := Convert_v1alpha1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(&in.PortNaming, &out.PortNaming, s); err != nil { return err } - out.QuickFilters = *(*[]v1beta1.QuickFilter)(unsafe.Pointer(&in.QuickFilters)) + out.QuickFilters = *(*[]v1beta2.QuickFilter)(unsafe.Pointer(&in.QuickFilters)) return nil } -// Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(in *FlowCollectorConsolePlugin, out *v1beta1.FlowCollectorConsolePlugin, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(in, out, s) +// Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in *FlowCollectorConsolePlugin, out *v1beta2.FlowCollectorConsolePlugin, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in, out, s) } -func autoConvert_v1beta1_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(in *v1beta1.FlowCollectorConsolePlugin, out *FlowCollectorConsolePlugin, s conversion.Scope) error { +func autoConvert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(in *v1beta2.FlowCollectorConsolePlugin, out *FlowCollectorConsolePlugin, s conversion.Scope) error { // WARNING: in.Enable requires manual conversion: does not exist in peer-type if err := v1.Convert_Pointer_bool_To_bool(&in.Register, &out.Register, s); err != nil { return err @@ -523,17 +528,17 @@ func autoConvert_v1beta1_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorCon out.ImagePullPolicy = in.ImagePullPolicy out.Resources = in.Resources out.LogLevel = in.LogLevel - if err := Convert_v1beta1_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(&in.Autoscaler, &out.Autoscaler, s); err != nil { + if err := Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(&in.Autoscaler, &out.Autoscaler, s); err != nil { return err } - if err := Convert_v1beta1_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(&in.PortNaming, &out.PortNaming, s); err != nil { + if err := Convert_v1beta2_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(&in.PortNaming, &out.PortNaming, s); err != nil { return err } out.QuickFilters = *(*[]QuickFilter)(unsafe.Pointer(&in.QuickFilters)) return nil } -func autoConvert_v1alpha1_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta1.FlowCollectorEBPF, s conversion.Scope) error { +func autoConvert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta2.FlowCollectorEBPF, s conversion.Scope) error { out.ImagePullPolicy = in.ImagePullPolicy out.Resources = in.Resources out.Sampling = (*int32)(unsafe.Pointer(in.Sampling)) @@ -544,18 +549,18 @@ func autoConvert_v1alpha1_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(in *Flo out.LogLevel = in.LogLevel out.Privileged = in.Privileged out.KafkaBatchSize = in.KafkaBatchSize - if err := Convert_v1alpha1_DebugConfig_To_v1beta1_DebugConfig(&in.Debug, &out.Debug, s); err != nil { + if err := Convert_v1alpha1_DebugConfig_To_v1beta2_DebugConfig(&in.Debug, &out.Debug, s); err != nil { return err } return nil } -// Convert_v1alpha1_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta1.FlowCollectorEBPF, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(in, out, s) +// Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta2.FlowCollectorEBPF, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in, out, s) } -func autoConvert_v1beta1_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in *v1beta1.FlowCollectorEBPF, out *FlowCollectorEBPF, s conversion.Scope) error { +func autoConvert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEBPF, out *FlowCollectorEBPF, s conversion.Scope) error { out.ImagePullPolicy = in.ImagePullPolicy out.Resources = in.Resources out.Sampling = (*int32)(unsafe.Pointer(in.Sampling)) @@ -566,41 +571,41 @@ func autoConvert_v1beta1_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in *v1b out.LogLevel = in.LogLevel out.Privileged = in.Privileged out.KafkaBatchSize = in.KafkaBatchSize - if err := Convert_v1beta1_DebugConfig_To_v1alpha1_DebugConfig(&in.Debug, &out.Debug, s); err != nil { + if err := Convert_v1beta2_DebugConfig_To_v1alpha1_DebugConfig(&in.Debug, &out.Debug, s); err != nil { return err } // WARNING: in.Features requires manual conversion: does not exist in peer-type return nil } -func autoConvert_v1alpha1_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter(in *FlowCollectorExporter, out *v1beta1.FlowCollectorExporter, s conversion.Scope) error { - out.Type = v1beta1.ExporterType(in.Type) - if err := Convert_v1alpha1_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { +func autoConvert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in *FlowCollectorExporter, out *v1beta2.FlowCollectorExporter, s conversion.Scope) error { + out.Type = v1beta2.ExporterType(in.Type) + if err := Convert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { return err } return nil } -// Convert_v1alpha1_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter(in *FlowCollectorExporter, out *v1beta1.FlowCollectorExporter, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter(in, out, s) +// Convert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in *FlowCollectorExporter, out *v1beta2.FlowCollectorExporter, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in, out, s) } -func autoConvert_v1beta1_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(in *v1beta1.FlowCollectorExporter, out *FlowCollectorExporter, s conversion.Scope) error { +func autoConvert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(in *v1beta2.FlowCollectorExporter, out *FlowCollectorExporter, s conversion.Scope) error { out.Type = ExporterType(in.Type) - if err := Convert_v1beta1_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { + if err := Convert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { return err } // WARNING: in.IPFIX requires manual conversion: does not exist in peer-type return nil } -func autoConvert_v1alpha1_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(in *FlowCollectorFLP, out *v1beta1.FlowCollectorFLP, s conversion.Scope) error { +func autoConvert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowCollectorFLP, out *v1beta2.FlowCollectorFLP, s conversion.Scope) error { out.Port = in.Port out.HealthPort = in.HealthPort out.ProfilePort = in.ProfilePort out.ImagePullPolicy = in.ImagePullPolicy - if err := Convert_v1alpha1_FLPMetrics_To_v1beta1_FLPMetrics(&in.Metrics, &out.Metrics, s); err != nil { + if err := Convert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(&in.Metrics, &out.Metrics, s); err != nil { return err } out.LogLevel = in.LogLevel @@ -614,28 +619,28 @@ func autoConvert_v1alpha1_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(in *FlowC if err := v1.Convert_int32_To_Pointer_int32(&in.KafkaConsumerReplicas, &out.KafkaConsumerReplicas, s); err != nil { return err } - if err := Convert_v1alpha1_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(&in.KafkaConsumerAutoscaler, &out.KafkaConsumerAutoscaler, s); err != nil { + if err := Convert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(&in.KafkaConsumerAutoscaler, &out.KafkaConsumerAutoscaler, s); err != nil { return err } out.KafkaConsumerQueueCapacity = in.KafkaConsumerQueueCapacity out.KafkaConsumerBatchSize = in.KafkaConsumerBatchSize - if err := Convert_v1alpha1_DebugConfig_To_v1beta1_DebugConfig(&in.Debug, &out.Debug, s); err != nil { + if err := Convert_v1alpha1_DebugConfig_To_v1beta2_DebugConfig(&in.Debug, &out.Debug, s); err != nil { return err } return nil } -// Convert_v1alpha1_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(in *FlowCollectorFLP, out *v1beta1.FlowCollectorFLP, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(in, out, s) +// Convert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowCollectorFLP, out *v1beta2.FlowCollectorFLP, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in, out, s) } -func autoConvert_v1beta1_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(in *v1beta1.FlowCollectorFLP, out *FlowCollectorFLP, s conversion.Scope) error { +func autoConvert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(in *v1beta2.FlowCollectorFLP, out *FlowCollectorFLP, s conversion.Scope) error { out.Port = in.Port out.HealthPort = in.HealthPort out.ProfilePort = in.ProfilePort out.ImagePullPolicy = in.ImagePullPolicy - if err := Convert_v1beta1_FLPMetrics_To_v1alpha1_FLPMetrics(&in.Metrics, &out.Metrics, s); err != nil { + if err := Convert_v1beta2_FLPMetrics_To_v1alpha1_FLPMetrics(&in.Metrics, &out.Metrics, s); err != nil { return err } out.LogLevel = in.LogLevel @@ -649,7 +654,7 @@ func autoConvert_v1beta1_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(in *v1bet if err := v1.Convert_Pointer_int32_To_int32(&in.KafkaConsumerReplicas, &out.KafkaConsumerReplicas, s); err != nil { return err } - if err := Convert_v1beta1_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(&in.KafkaConsumerAutoscaler, &out.KafkaConsumerAutoscaler, s); err != nil { + if err := Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(&in.KafkaConsumerAutoscaler, &out.KafkaConsumerAutoscaler, s); err != nil { return err } out.KafkaConsumerQueueCapacity = in.KafkaConsumerQueueCapacity @@ -665,7 +670,7 @@ func autoConvert_v1beta1_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(in *v1bet return nil } -func autoConvert_v1alpha1_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(in *FlowCollectorHPA, out *v1beta1.FlowCollectorHPA, s conversion.Scope) error { +func autoConvert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in *FlowCollectorHPA, out *v1beta2.FlowCollectorHPA, s conversion.Scope) error { out.Status = in.Status out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) out.MaxReplicas = in.MaxReplicas @@ -673,12 +678,12 @@ func autoConvert_v1alpha1_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(in *FlowC return nil } -// Convert_v1alpha1_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(in *FlowCollectorHPA, out *v1beta1.FlowCollectorHPA, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(in, out, s) +// Convert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in *FlowCollectorHPA, out *v1beta2.FlowCollectorHPA, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in, out, s) } -func autoConvert_v1beta1_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in *v1beta1.FlowCollectorHPA, out *FlowCollectorHPA, s conversion.Scope) error { +func autoConvert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in *v1beta2.FlowCollectorHPA, out *FlowCollectorHPA, s conversion.Scope) error { out.Status = in.Status out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) out.MaxReplicas = in.MaxReplicas @@ -686,85 +691,85 @@ func autoConvert_v1beta1_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in *v1bet return nil } -// Convert_v1beta1_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in *v1beta1.FlowCollectorHPA, out *FlowCollectorHPA, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in, out, s) +// Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in *v1beta2.FlowCollectorHPA, out *FlowCollectorHPA, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in, out, s) } -func autoConvert_v1alpha1_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta1.FlowCollectorIPFIX, s conversion.Scope) error { +func autoConvert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta2.FlowCollectorIPFIX, s conversion.Scope) error { out.CacheActiveTimeout = in.CacheActiveTimeout out.CacheMaxFlows = in.CacheMaxFlows out.Sampling = in.Sampling out.ForceSampleAll = in.ForceSampleAll - if err := Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { + if err := Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { return err } - if err := Convert_v1alpha1_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { + if err := Convert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { return err } return nil } -// Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta1.FlowCollectorIPFIX, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(in, out, s) +// Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta2.FlowCollectorIPFIX, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in, out, s) } -func autoConvert_v1beta1_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in *v1beta1.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { +func autoConvert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in *v1beta2.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { out.CacheActiveTimeout = in.CacheActiveTimeout out.CacheMaxFlows = in.CacheMaxFlows out.Sampling = in.Sampling out.ForceSampleAll = in.ForceSampleAll - if err := Convert_v1beta1_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { + if err := Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { return err } - if err := Convert_v1beta1_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { + if err := Convert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { return err } return nil } -// Convert_v1beta1_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in *v1beta1.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in, out, s) +// Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in *v1beta2.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in, out, s) } -func autoConvert_v1alpha1_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(in *FlowCollectorKafka, out *v1beta1.FlowCollectorKafka, s conversion.Scope) error { +func autoConvert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(in *FlowCollectorKafka, out *v1beta2.FlowCollectorKafka, s conversion.Scope) error { out.Address = in.Address out.Topic = in.Topic - if err := Convert_v1alpha1_ClientTLS_To_v1beta1_ClientTLS(&in.TLS, &out.TLS, s); err != nil { + if err := Convert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS(&in.TLS, &out.TLS, s); err != nil { return err } return nil } -// Convert_v1alpha1_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(in *FlowCollectorKafka, out *v1beta1.FlowCollectorKafka, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(in, out, s) +// Convert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(in *FlowCollectorKafka, out *v1beta2.FlowCollectorKafka, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(in, out, s) } -func autoConvert_v1beta1_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(in *v1beta1.FlowCollectorKafka, out *FlowCollectorKafka, s conversion.Scope) error { +func autoConvert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(in *v1beta2.FlowCollectorKafka, out *FlowCollectorKafka, s conversion.Scope) error { out.Address = in.Address out.Topic = in.Topic - if err := Convert_v1beta1_ClientTLS_To_v1alpha1_ClientTLS(&in.TLS, &out.TLS, s); err != nil { + if err := Convert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS(&in.TLS, &out.TLS, s); err != nil { return err } // INFO: in.SASL opted out of conversion generation return nil } -// Convert_v1beta1_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(in *v1beta1.FlowCollectorKafka, out *FlowCollectorKafka, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(in, out, s) +// Convert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(in *v1beta2.FlowCollectorKafka, out *FlowCollectorKafka, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(in, out, s) } -func autoConvert_v1alpha1_FlowCollectorList_To_v1beta1_FlowCollectorList(in *FlowCollectorList, out *v1beta1.FlowCollectorList, s conversion.Scope) error { +func autoConvert_v1alpha1_FlowCollectorList_To_v1beta2_FlowCollectorList(in *FlowCollectorList, out *v1beta2.FlowCollectorList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]v1beta1.FlowCollector, len(*in)) + *out = make([]v1beta2.FlowCollector, len(*in)) for i := range *in { - if err := Convert_v1alpha1_FlowCollector_To_v1beta1_FlowCollector(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -774,18 +779,18 @@ func autoConvert_v1alpha1_FlowCollectorList_To_v1beta1_FlowCollectorList(in *Flo return nil } -// Convert_v1alpha1_FlowCollectorList_To_v1beta1_FlowCollectorList is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorList_To_v1beta1_FlowCollectorList(in *FlowCollectorList, out *v1beta1.FlowCollectorList, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorList_To_v1beta1_FlowCollectorList(in, out, s) +// Convert_v1alpha1_FlowCollectorList_To_v1beta2_FlowCollectorList is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollectorList_To_v1beta2_FlowCollectorList(in *FlowCollectorList, out *v1beta2.FlowCollectorList, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorList_To_v1beta2_FlowCollectorList(in, out, s) } -func autoConvert_v1beta1_FlowCollectorList_To_v1alpha1_FlowCollectorList(in *v1beta1.FlowCollectorList, out *FlowCollectorList, s conversion.Scope) error { +func autoConvert_v1beta2_FlowCollectorList_To_v1alpha1_FlowCollectorList(in *v1beta2.FlowCollectorList, out *FlowCollectorList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]FlowCollector, len(*in)) for i := range *in { - if err := Convert_v1beta1_FlowCollector_To_v1alpha1_FlowCollector(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -795,17 +800,17 @@ func autoConvert_v1beta1_FlowCollectorList_To_v1alpha1_FlowCollectorList(in *v1b return nil } -// Convert_v1beta1_FlowCollectorList_To_v1alpha1_FlowCollectorList is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorList_To_v1alpha1_FlowCollectorList(in *v1beta1.FlowCollectorList, out *FlowCollectorList, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorList_To_v1alpha1_FlowCollectorList(in, out, s) +// Convert_v1beta2_FlowCollectorList_To_v1alpha1_FlowCollectorList is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorList_To_v1alpha1_FlowCollectorList(in *v1beta2.FlowCollectorList, out *FlowCollectorList, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorList_To_v1alpha1_FlowCollectorList(in, out, s) } -func autoConvert_v1alpha1_FlowCollectorLoki_To_v1beta1_FlowCollectorLoki(in *FlowCollectorLoki, out *v1beta1.FlowCollectorLoki, s conversion.Scope) error { - out.URL = in.URL - out.QuerierURL = in.QuerierURL - out.StatusURL = in.StatusURL - out.TenantID = in.TenantID - out.AuthToken = in.AuthToken +func autoConvert_v1alpha1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in *FlowCollectorLoki, out *v1beta2.FlowCollectorLoki, s conversion.Scope) error { + // WARNING: in.URL requires manual conversion: does not exist in peer-type + // WARNING: in.QuerierURL requires manual conversion: does not exist in peer-type + // WARNING: in.StatusURL requires manual conversion: does not exist in peer-type + // WARNING: in.TenantID requires manual conversion: does not exist in peer-type + // WARNING: in.AuthToken requires manual conversion: does not exist in peer-type if err := v1.Convert_v1_Duration_To_Pointer_v1_Duration(&in.BatchWait, &out.BatchWait, s); err != nil { return err } @@ -823,24 +828,15 @@ func autoConvert_v1alpha1_FlowCollectorLoki_To_v1beta1_FlowCollectorLoki(in *Flo return err } out.StaticLabels = *(*map[string]string)(unsafe.Pointer(&in.StaticLabels)) - if err := Convert_v1alpha1_ClientTLS_To_v1beta1_ClientTLS(&in.TLS, &out.TLS, s); err != nil { - return err - } + // WARNING: in.TLS requires manual conversion: does not exist in peer-type return nil } -// Convert_v1alpha1_FlowCollectorLoki_To_v1beta1_FlowCollectorLoki is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorLoki_To_v1beta1_FlowCollectorLoki(in *FlowCollectorLoki, out *v1beta1.FlowCollectorLoki, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorLoki_To_v1beta1_FlowCollectorLoki(in, out, s) -} - -func autoConvert_v1beta1_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(in *v1beta1.FlowCollectorLoki, out *FlowCollectorLoki, s conversion.Scope) error { +func autoConvert_v1beta2_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(in *v1beta2.FlowCollectorLoki, out *FlowCollectorLoki, s conversion.Scope) error { + // WARNING: in.Mode requires manual conversion: does not exist in peer-type + // WARNING: in.Manual requires manual conversion: does not exist in peer-type + // WARNING: in.LokiStack requires manual conversion: does not exist in peer-type // WARNING: in.Enable requires manual conversion: does not exist in peer-type - out.URL = in.URL - out.QuerierURL = in.QuerierURL - out.StatusURL = in.StatusURL - out.TenantID = in.TenantID - out.AuthToken = in.AuthToken if err := v1.Convert_Pointer_v1_Duration_To_v1_Duration(&in.BatchWait, &out.BatchWait, s); err != nil { return err } @@ -858,178 +854,178 @@ func autoConvert_v1beta1_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(in *v1b return err } out.StaticLabels = *(*map[string]string)(unsafe.Pointer(&in.StaticLabels)) - if err := Convert_v1beta1_ClientTLS_To_v1alpha1_ClientTLS(&in.TLS, &out.TLS, s); err != nil { - return err - } - // WARNING: in.StatusTLS requires manual conversion: does not exist in peer-type return nil } -func autoConvert_v1alpha1_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(in *FlowCollectorSpec, out *v1beta1.FlowCollectorSpec, s conversion.Scope) error { +func autoConvert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in *FlowCollectorSpec, out *v1beta2.FlowCollectorSpec, s conversion.Scope) error { out.Namespace = in.Namespace - if err := Convert_v1alpha1_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(&in.Agent, &out.Agent, s); err != nil { - return err - } - if err := Convert_v1alpha1_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(&in.Processor, &out.Processor, s); err != nil { + if err := Convert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(&in.Agent, &out.Agent, s); err != nil { return err } - if err := Convert_v1alpha1_FlowCollectorLoki_To_v1beta1_FlowCollectorLoki(&in.Loki, &out.Loki, s); err != nil { + if err := Convert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(&in.Processor, &out.Processor, s); err != nil { return err } - if err := Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(&in.ConsolePlugin, &out.ConsolePlugin, s); err != nil { + // INFO: in.Loki opted out of conversion generation + if err := Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(&in.ConsolePlugin, &out.ConsolePlugin, s); err != nil { return err } out.DeploymentModel = in.DeploymentModel - if err := Convert_v1alpha1_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { + if err := Convert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { return err } // INFO: in.Exporters opted out of conversion generation return nil } -// Convert_v1alpha1_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(in *FlowCollectorSpec, out *v1beta1.FlowCollectorSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(in, out, s) +// Convert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in *FlowCollectorSpec, out *v1beta2.FlowCollectorSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in, out, s) } -func autoConvert_v1beta1_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(in *v1beta1.FlowCollectorSpec, out *FlowCollectorSpec, s conversion.Scope) error { +func autoConvert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(in *v1beta2.FlowCollectorSpec, out *FlowCollectorSpec, s conversion.Scope) error { out.Namespace = in.Namespace - if err := Convert_v1beta1_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(&in.Agent, &out.Agent, s); err != nil { + if err := Convert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(&in.Agent, &out.Agent, s); err != nil { return err } - if err := Convert_v1beta1_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(&in.Processor, &out.Processor, s); err != nil { + if err := Convert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(&in.Processor, &out.Processor, s); err != nil { return err } - if err := Convert_v1beta1_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(&in.Loki, &out.Loki, s); err != nil { - return err - } - if err := Convert_v1beta1_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(&in.ConsolePlugin, &out.ConsolePlugin, s); err != nil { + // INFO: in.Loki opted out of conversion generation + if err := Convert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(&in.ConsolePlugin, &out.ConsolePlugin, s); err != nil { return err } out.DeploymentModel = in.DeploymentModel - if err := Convert_v1beta1_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { + if err := Convert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { return err } // INFO: in.Exporters opted out of conversion generation return nil } -// Convert_v1beta1_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(in *v1beta1.FlowCollectorSpec, out *FlowCollectorSpec, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(in, out, s) +// Convert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(in *v1beta2.FlowCollectorSpec, out *FlowCollectorSpec, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(in, out, s) } -func autoConvert_v1alpha1_FlowCollectorStatus_To_v1beta1_FlowCollectorStatus(in *FlowCollectorStatus, out *v1beta1.FlowCollectorStatus, s conversion.Scope) error { +func autoConvert_v1alpha1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(in *FlowCollectorStatus, out *v1beta2.FlowCollectorStatus, s conversion.Scope) error { out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions)) out.Namespace = in.Namespace return nil } -// Convert_v1alpha1_FlowCollectorStatus_To_v1beta1_FlowCollectorStatus is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorStatus_To_v1beta1_FlowCollectorStatus(in *FlowCollectorStatus, out *v1beta1.FlowCollectorStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorStatus_To_v1beta1_FlowCollectorStatus(in, out, s) +// Convert_v1alpha1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(in *FlowCollectorStatus, out *v1beta2.FlowCollectorStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(in, out, s) } -func autoConvert_v1beta1_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(in *v1beta1.FlowCollectorStatus, out *FlowCollectorStatus, s conversion.Scope) error { +func autoConvert_v1beta2_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(in *v1beta2.FlowCollectorStatus, out *FlowCollectorStatus, s conversion.Scope) error { out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions)) out.Namespace = in.Namespace return nil } -// Convert_v1beta1_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(in *v1beta1.FlowCollectorStatus, out *FlowCollectorStatus, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(in, out, s) +// Convert_v1beta2_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(in *v1beta2.FlowCollectorStatus, out *FlowCollectorStatus, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(in, out, s) } -func autoConvert_v1alpha1_MetricsServerConfig_To_v1beta1_MetricsServerConfig(in *MetricsServerConfig, out *v1beta1.MetricsServerConfig, s conversion.Scope) error { +func autoConvert_v1alpha1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(in *MetricsServerConfig, out *v1beta2.MetricsServerConfig, s conversion.Scope) error { out.Port = in.Port - if err := Convert_v1alpha1_ServerTLS_To_v1beta1_ServerTLS(&in.TLS, &out.TLS, s); err != nil { + if err := Convert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(&in.TLS, &out.TLS, s); err != nil { return err } return nil } -// Convert_v1alpha1_MetricsServerConfig_To_v1beta1_MetricsServerConfig is an autogenerated conversion function. -func Convert_v1alpha1_MetricsServerConfig_To_v1beta1_MetricsServerConfig(in *MetricsServerConfig, out *v1beta1.MetricsServerConfig, s conversion.Scope) error { - return autoConvert_v1alpha1_MetricsServerConfig_To_v1beta1_MetricsServerConfig(in, out, s) +// Convert_v1alpha1_MetricsServerConfig_To_v1beta2_MetricsServerConfig is an autogenerated conversion function. +func Convert_v1alpha1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(in *MetricsServerConfig, out *v1beta2.MetricsServerConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(in, out, s) } -func autoConvert_v1beta1_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(in *v1beta1.MetricsServerConfig, out *MetricsServerConfig, s conversion.Scope) error { +func autoConvert_v1beta2_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(in *v1beta2.MetricsServerConfig, out *MetricsServerConfig, s conversion.Scope) error { out.Port = in.Port - if err := Convert_v1beta1_ServerTLS_To_v1alpha1_ServerTLS(&in.TLS, &out.TLS, s); err != nil { + if err := Convert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(&in.TLS, &out.TLS, s); err != nil { return err } return nil } -// Convert_v1beta1_MetricsServerConfig_To_v1alpha1_MetricsServerConfig is an autogenerated conversion function. -func Convert_v1beta1_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(in *v1beta1.MetricsServerConfig, out *MetricsServerConfig, s conversion.Scope) error { - return autoConvert_v1beta1_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(in, out, s) +// Convert_v1beta2_MetricsServerConfig_To_v1alpha1_MetricsServerConfig is an autogenerated conversion function. +func Convert_v1beta2_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(in *v1beta2.MetricsServerConfig, out *MetricsServerConfig, s conversion.Scope) error { + return autoConvert_v1beta2_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(in, out, s) } -func autoConvert_v1alpha1_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig(in *OVNKubernetesConfig, out *v1beta1.OVNKubernetesConfig, s conversion.Scope) error { +func autoConvert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(in *OVNKubernetesConfig, out *v1beta2.OVNKubernetesConfig, s conversion.Scope) error { out.Namespace = in.Namespace out.DaemonSetName = in.DaemonSetName out.ContainerName = in.ContainerName return nil } -// Convert_v1alpha1_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig is an autogenerated conversion function. -func Convert_v1alpha1_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig(in *OVNKubernetesConfig, out *v1beta1.OVNKubernetesConfig, s conversion.Scope) error { - return autoConvert_v1alpha1_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig(in, out, s) +// Convert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig is an autogenerated conversion function. +func Convert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(in *OVNKubernetesConfig, out *v1beta2.OVNKubernetesConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(in, out, s) } -func autoConvert_v1beta1_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(in *v1beta1.OVNKubernetesConfig, out *OVNKubernetesConfig, s conversion.Scope) error { +func autoConvert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(in *v1beta2.OVNKubernetesConfig, out *OVNKubernetesConfig, s conversion.Scope) error { out.Namespace = in.Namespace out.DaemonSetName = in.DaemonSetName out.ContainerName = in.ContainerName return nil } -// Convert_v1beta1_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig is an autogenerated conversion function. -func Convert_v1beta1_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(in *v1beta1.OVNKubernetesConfig, out *OVNKubernetesConfig, s conversion.Scope) error { - return autoConvert_v1beta1_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(in, out, s) +// Convert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig is an autogenerated conversion function. +func Convert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(in *v1beta2.OVNKubernetesConfig, out *OVNKubernetesConfig, s conversion.Scope) error { + return autoConvert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(in, out, s) } -func autoConvert_v1alpha1_QuickFilter_To_v1beta1_QuickFilter(in *QuickFilter, out *v1beta1.QuickFilter, s conversion.Scope) error { +func autoConvert_v1alpha1_QuickFilter_To_v1beta2_QuickFilter(in *QuickFilter, out *v1beta2.QuickFilter, s conversion.Scope) error { out.Name = in.Name out.Filter = *(*map[string]string)(unsafe.Pointer(&in.Filter)) out.Default = in.Default return nil } -// Convert_v1alpha1_QuickFilter_To_v1beta1_QuickFilter is an autogenerated conversion function. -func Convert_v1alpha1_QuickFilter_To_v1beta1_QuickFilter(in *QuickFilter, out *v1beta1.QuickFilter, s conversion.Scope) error { - return autoConvert_v1alpha1_QuickFilter_To_v1beta1_QuickFilter(in, out, s) +// Convert_v1alpha1_QuickFilter_To_v1beta2_QuickFilter is an autogenerated conversion function. +func Convert_v1alpha1_QuickFilter_To_v1beta2_QuickFilter(in *QuickFilter, out *v1beta2.QuickFilter, s conversion.Scope) error { + return autoConvert_v1alpha1_QuickFilter_To_v1beta2_QuickFilter(in, out, s) } -func autoConvert_v1beta1_QuickFilter_To_v1alpha1_QuickFilter(in *v1beta1.QuickFilter, out *QuickFilter, s conversion.Scope) error { +func autoConvert_v1beta2_QuickFilter_To_v1alpha1_QuickFilter(in *v1beta2.QuickFilter, out *QuickFilter, s conversion.Scope) error { out.Name = in.Name out.Filter = *(*map[string]string)(unsafe.Pointer(&in.Filter)) out.Default = in.Default return nil } -// Convert_v1beta1_QuickFilter_To_v1alpha1_QuickFilter is an autogenerated conversion function. -func Convert_v1beta1_QuickFilter_To_v1alpha1_QuickFilter(in *v1beta1.QuickFilter, out *QuickFilter, s conversion.Scope) error { - return autoConvert_v1beta1_QuickFilter_To_v1alpha1_QuickFilter(in, out, s) +// Convert_v1beta2_QuickFilter_To_v1alpha1_QuickFilter is an autogenerated conversion function. +func Convert_v1beta2_QuickFilter_To_v1alpha1_QuickFilter(in *v1beta2.QuickFilter, out *QuickFilter, s conversion.Scope) error { + return autoConvert_v1beta2_QuickFilter_To_v1alpha1_QuickFilter(in, out, s) } -func autoConvert_v1alpha1_ServerTLS_To_v1beta1_ServerTLS(in *ServerTLS, out *v1beta1.ServerTLS, s conversion.Scope) error { - out.Type = v1beta1.ServerTLSConfigType(in.Type) - out.Provided = (*v1beta1.CertificateReference)(unsafe.Pointer(in.Provided)) +func autoConvert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(in *ServerTLS, out *v1beta2.ServerTLS, s conversion.Scope) error { + out.Type = v1beta2.ServerTLSConfigType(in.Type) + out.Provided = (*v1beta2.CertificateReference)(unsafe.Pointer(in.Provided)) return nil } -// Convert_v1alpha1_ServerTLS_To_v1beta1_ServerTLS is an autogenerated conversion function. -func Convert_v1alpha1_ServerTLS_To_v1beta1_ServerTLS(in *ServerTLS, out *v1beta1.ServerTLS, s conversion.Scope) error { - return autoConvert_v1alpha1_ServerTLS_To_v1beta1_ServerTLS(in, out, s) +// Convert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS is an autogenerated conversion function. +func Convert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(in *ServerTLS, out *v1beta2.ServerTLS, s conversion.Scope) error { + return autoConvert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(in, out, s) } -func autoConvert_v1beta1_ServerTLS_To_v1alpha1_ServerTLS(in *v1beta1.ServerTLS, out *ServerTLS, s conversion.Scope) error { +func autoConvert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(in *v1beta2.ServerTLS, out *ServerTLS, s conversion.Scope) error { out.Type = ServerTLSConfigType(in.Type) out.Provided = (*CertificateReference)(unsafe.Pointer(in.Provided)) // WARNING: in.InsecureSkipVerify requires manual conversion: does not exist in peer-type // WARNING: in.ProvidedCaFile requires manual conversion: does not exist in peer-type return nil } +<<<<<<< HEAD +======= + +// Convert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS is an autogenerated conversion function. +func Convert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(in *v1beta2.ServerTLS, out *ServerTLS, s conversion.Scope) error { + return autoConvert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(in, out, s) +} +>>>>>>> 09af8ae (ADD v1beta2 with new Loki integration fields) diff --git a/api/v1beta1/doc.go b/api/v1beta1/doc.go index d7e24aa1a..4f23c7ff0 100644 --- a/api/v1beta1/doc.go +++ b/api/v1beta1/doc.go @@ -12,4 +12,5 @@ limitations under the License. */ // Package v1beta1 contains the v1beta1 API implementation. +// +k8s:conversion-gen=github.com/netobserv/network-observability-operator/api/v1beta2 package v1beta1 diff --git a/api/v1beta1/flowcollector_types.go b/api/v1beta1/flowcollector_types.go index 8ce4ca8fe..8817ded47 100644 --- a/api/v1beta1/flowcollector_types.go +++ b/api/v1beta1/flowcollector_types.go @@ -57,7 +57,8 @@ type FlowCollectorSpec struct { // enriches them, generates metrics, and forwards them to the Loki persistence layer and/or any available exporter. Processor FlowCollectorFLP `json:"processor,omitempty"` - // Loki, the flow store, client settings. + // loki, the flow store, client settings. + // +k8s:conversion-gen=false Loki FlowCollectorLoki `json:"loki,omitempty"` // `consolePlugin` defines the settings related to the OpenShift Console plugin, when available. @@ -848,7 +849,6 @@ type FlowCollectorStatus struct { // +kubebuilder:printcolumn:name="Sampling (EBPF)",type="string",JSONPath=`.spec.agent.ebpf.sampling` // +kubebuilder:printcolumn:name="Deployment Model",type="string",JSONPath=`.spec.deploymentModel` // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[*].reason" -// +kubebuilder:storageversion // `FlowCollector` is the schema for the network flows collection API, which pilots and configures the underlying deployments. type FlowCollector struct { diff --git a/api/v1beta1/flowcollector_webhook.go b/api/v1beta1/flowcollector_webhook.go index feda7f3f2..525ce3bfd 100644 --- a/api/v1beta1/flowcollector_webhook.go +++ b/api/v1beta1/flowcollector_webhook.go @@ -16,17 +16,100 @@ limitations under the License. package v1beta1 -import ctrl "sigs.k8s.io/controller-runtime" +import ( + "fmt" -// +kubebuilder:webhook:verbs=create;update,path=/validate-netobserv-io-v1beta1-flowcollector,mutating=false,failurePolicy=fail,groups=netobserv.io,resources=flowcollectors,versions=v1beta1,name=flowcollectorconversionwebhook.netobserv.io,sideEffects=None,admissionReviewVersions=v1 -func (r *FlowCollector) SetupWebhookWithManager(mgr ctrl.Manager) error { - return ctrl.NewWebhookManagedBy(mgr). - For(r). - Complete() + "github.com/netobserv/network-observability-operator/api/v1beta2" + utilconversion "github.com/netobserv/network-observability-operator/pkg/conversion" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apiconversion "k8s.io/apimachinery/pkg/conversion" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this v1beta1 FlowCollector to its v1beta2 equivalent (the conversion Hub) +// https://book.kubebuilder.io/multiversion-tutorial/conversion.html +func (r *FlowCollector) ConvertTo(dstRaw conversion.Hub) error { + dst := dstRaw.(*v1beta2.FlowCollector) + + if err := Convert_v1beta1_FlowCollector_To_v1beta2_FlowCollector(r, dst, nil); err != nil { + return fmt.Errorf("copying v1beta1.FlowCollector into v1beta2.FlowCollector: %w", err) + } + dst.Status.Conditions = make([]v1.Condition, len(r.Status.Conditions)) + copy(dst.Status.Conditions, r.Status.Conditions) + + // Manually restore data. + restored := &v1beta2.FlowCollector{} + if ok, err := utilconversion.UnmarshalData(r, restored); err != nil || !ok { + return err + } + + dst.Spec.Processor.LogTypes = restored.Spec.Processor.LogTypes + + if restored.Spec.Processor.ConversationHeartbeatInterval != nil { + dst.Spec.Processor.ConversationHeartbeatInterval = restored.Spec.Processor.ConversationHeartbeatInterval + } + + if restored.Spec.Processor.ConversationEndTimeout != nil { + dst.Spec.Processor.ConversationEndTimeout = restored.Spec.Processor.ConversationEndTimeout + } + + if restored.Spec.Processor.Metrics.DisableAlerts != nil { + dst.Spec.Processor.Metrics.DisableAlerts = restored.Spec.Processor.Metrics.DisableAlerts + } + + dst.Spec.Loki.Manual = restored.Spec.Loki.Manual + + return nil +} + +// ConvertFrom converts the hub version v1beta2 FlowCollector object to v1beta1 +func (r *FlowCollector) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*v1beta2.FlowCollector) + + if err := Convert_v1beta2_FlowCollector_To_v1beta1_FlowCollector(src, r, nil); err != nil { + return fmt.Errorf("copying v1beta2.FlowCollector into v1beta1.FlowCollector: %w", err) + } + r.Status.Conditions = make([]v1.Condition, len(src.Status.Conditions)) + copy(r.Status.Conditions, src.Status.Conditions) + + // Preserve Hub data on down-conversion except for metadata + return utilconversion.MarshalData(src, r) +} + +func (r *FlowCollectorList) ConvertTo(dstRaw conversion.Hub) error { + dst := dstRaw.(*v1beta2.FlowCollectorList) + return Convert_v1beta1_FlowCollectorList_To_v1beta2_FlowCollectorList(r, dst, nil) +} + +func (r *FlowCollectorList) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*v1beta2.FlowCollectorList) + return Convert_v1beta2_FlowCollectorList_To_v1beta1_FlowCollectorList(src, r, nil) +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have new defined fields in v1beta2 not in v1beta1 +// nolint:golint,stylecheck,revive +func Convert_v1beta2_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(in *v1beta2.FlowCollectorFLP, out *FlowCollectorFLP, s apiconversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(in, out, s) +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have new defined fields in v1beta2 not in v1beta1 +// nolint:golint,stylecheck,revive +func Convert_v1beta2_FLPMetrics_To_v1beta1_FLPMetrics(in *v1beta2.FLPMetrics, out *FLPMetrics, s apiconversion.Scope) error { + return autoConvert_v1beta2_FLPMetrics_To_v1beta1_FLPMetrics(in, out, s) } -// Hub marks this version as a conversion hub. -// All the other version need to provide converters from/to this version. -// https://book.kubebuilder.io/multiversion-tutorial/conversion-concepts.html -func (*FlowCollector) Hub() {} -func (*FlowCollectorList) Hub() {} +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have new defined fields in v1beta2 not in v1beta1 +// nolint:golint,stylecheck,revive +func Convert_v1beta2_FlowCollectorLoki_To_v1beta1_FlowCollectorLoki(in *v1beta2.FlowCollectorLoki, out *FlowCollectorLoki, s apiconversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorLoki_To_v1beta1_FlowCollectorLoki(in, out, s) +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have new defined fields in v1beta2 not in v1beta1 +// nolint:golint,stylecheck,revive +func Convert_v1beta1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in *FlowCollectorLoki, out *v1beta2.FlowCollectorLoki, s apiconversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in, out, s) +} diff --git a/api/v1beta1/groupversion_info.go b/api/v1beta1/groupversion_info.go index 64920b249..20d0b792d 100644 --- a/api/v1beta1/groupversion_info.go +++ b/api/v1beta1/groupversion_info.go @@ -32,5 +32,6 @@ var ( SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} // AddToScheme adds the types in this group-version to the given scheme. - AddToScheme = SchemeBuilder.AddToScheme + AddToScheme = SchemeBuilder.AddToScheme + localSchemeBuilder = SchemeBuilder.SchemeBuilder ) diff --git a/api/v1beta1/zz_generated.conversion.go b/api/v1beta1/zz_generated.conversion.go new file mode 100644 index 000000000..005e9a27c --- /dev/null +++ b/api/v1beta1/zz_generated.conversion.go @@ -0,0 +1,1116 @@ +//go:build !ignore_autogenerated_core +// +build !ignore_autogenerated_core + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1beta1 + +import ( + unsafe "unsafe" + + v1beta2 "github.com/netobserv/network-observability-operator/api/v1beta2" + v2 "k8s.io/api/autoscaling/v2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*CertificateReference)(nil), (*v1beta2.CertificateReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CertificateReference_To_v1beta2_CertificateReference(a.(*CertificateReference), b.(*v1beta2.CertificateReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.CertificateReference)(nil), (*CertificateReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_CertificateReference_To_v1beta1_CertificateReference(a.(*v1beta2.CertificateReference), b.(*CertificateReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClientTLS)(nil), (*v1beta2.ClientTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ClientTLS_To_v1beta2_ClientTLS(a.(*ClientTLS), b.(*v1beta2.ClientTLS), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.ClientTLS)(nil), (*ClientTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ClientTLS_To_v1beta1_ClientTLS(a.(*v1beta2.ClientTLS), b.(*ClientTLS), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClusterNetworkOperatorConfig)(nil), (*v1beta2.ClusterNetworkOperatorConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(a.(*ClusterNetworkOperatorConfig), b.(*v1beta2.ClusterNetworkOperatorConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.ClusterNetworkOperatorConfig)(nil), (*ClusterNetworkOperatorConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig(a.(*v1beta2.ClusterNetworkOperatorConfig), b.(*ClusterNetworkOperatorConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ConfigOrSecret)(nil), (*v1beta2.ConfigOrSecret)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ConfigOrSecret_To_v1beta2_ConfigOrSecret(a.(*ConfigOrSecret), b.(*v1beta2.ConfigOrSecret), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.ConfigOrSecret)(nil), (*ConfigOrSecret)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ConfigOrSecret_To_v1beta1_ConfigOrSecret(a.(*v1beta2.ConfigOrSecret), b.(*ConfigOrSecret), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ConsolePluginPortConfig)(nil), (*v1beta2.ConsolePluginPortConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(a.(*ConsolePluginPortConfig), b.(*v1beta2.ConsolePluginPortConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.ConsolePluginPortConfig)(nil), (*ConsolePluginPortConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig(a.(*v1beta2.ConsolePluginPortConfig), b.(*ConsolePluginPortConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*DebugConfig)(nil), (*v1beta2.DebugConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DebugConfig_To_v1beta2_DebugConfig(a.(*DebugConfig), b.(*v1beta2.DebugConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.DebugConfig)(nil), (*DebugConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DebugConfig_To_v1beta1_DebugConfig(a.(*v1beta2.DebugConfig), b.(*DebugConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FLPMetrics)(nil), (*v1beta2.FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FLPMetrics_To_v1beta2_FLPMetrics(a.(*FLPMetrics), b.(*v1beta2.FLPMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FlowCollector)(nil), (*v1beta2.FlowCollector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollector_To_v1beta2_FlowCollector(a.(*FlowCollector), b.(*v1beta2.FlowCollector), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollector)(nil), (*FlowCollector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollector_To_v1beta1_FlowCollector(a.(*v1beta2.FlowCollector), b.(*FlowCollector), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FlowCollectorAgent)(nil), (*v1beta2.FlowCollectorAgent)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(a.(*FlowCollectorAgent), b.(*v1beta2.FlowCollectorAgent), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorAgent)(nil), (*FlowCollectorAgent)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(a.(*v1beta2.FlowCollectorAgent), b.(*FlowCollectorAgent), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FlowCollectorConsolePlugin)(nil), (*v1beta2.FlowCollectorConsolePlugin)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(a.(*FlowCollectorConsolePlugin), b.(*v1beta2.FlowCollectorConsolePlugin), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorConsolePlugin)(nil), (*FlowCollectorConsolePlugin)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(a.(*v1beta2.FlowCollectorConsolePlugin), b.(*FlowCollectorConsolePlugin), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FlowCollectorEBPF)(nil), (*v1beta2.FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(a.(*FlowCollectorEBPF), b.(*v1beta2.FlowCollectorEBPF), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorEBPF)(nil), (*FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(a.(*v1beta2.FlowCollectorEBPF), b.(*FlowCollectorEBPF), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FlowCollectorExporter)(nil), (*v1beta2.FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(a.(*FlowCollectorExporter), b.(*v1beta2.FlowCollectorExporter), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorExporter)(nil), (*FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter(a.(*v1beta2.FlowCollectorExporter), b.(*FlowCollectorExporter), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FlowCollectorFLP)(nil), (*v1beta2.FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(a.(*FlowCollectorFLP), b.(*v1beta2.FlowCollectorFLP), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FlowCollectorHPA)(nil), (*v1beta2.FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(a.(*FlowCollectorHPA), b.(*v1beta2.FlowCollectorHPA), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorHPA)(nil), (*FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(a.(*v1beta2.FlowCollectorHPA), b.(*FlowCollectorHPA), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FlowCollectorIPFIX)(nil), (*v1beta2.FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(a.(*FlowCollectorIPFIX), b.(*v1beta2.FlowCollectorIPFIX), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorIPFIX)(nil), (*FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(a.(*v1beta2.FlowCollectorIPFIX), b.(*FlowCollectorIPFIX), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FlowCollectorIPFIXReceiver)(nil), (*v1beta2.FlowCollectorIPFIXReceiver)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(a.(*FlowCollectorIPFIXReceiver), b.(*v1beta2.FlowCollectorIPFIXReceiver), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorIPFIXReceiver)(nil), (*FlowCollectorIPFIXReceiver)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(a.(*v1beta2.FlowCollectorIPFIXReceiver), b.(*FlowCollectorIPFIXReceiver), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FlowCollectorKafka)(nil), (*v1beta2.FlowCollectorKafka)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(a.(*FlowCollectorKafka), b.(*v1beta2.FlowCollectorKafka), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorKafka)(nil), (*FlowCollectorKafka)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(a.(*v1beta2.FlowCollectorKafka), b.(*FlowCollectorKafka), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FlowCollectorList)(nil), (*v1beta2.FlowCollectorList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorList_To_v1beta2_FlowCollectorList(a.(*FlowCollectorList), b.(*v1beta2.FlowCollectorList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorList)(nil), (*FlowCollectorList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorList_To_v1beta1_FlowCollectorList(a.(*v1beta2.FlowCollectorList), b.(*FlowCollectorList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FlowCollectorSpec)(nil), (*v1beta2.FlowCollectorSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(a.(*FlowCollectorSpec), b.(*v1beta2.FlowCollectorSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorSpec)(nil), (*FlowCollectorSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(a.(*v1beta2.FlowCollectorSpec), b.(*FlowCollectorSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FlowCollectorStatus)(nil), (*v1beta2.FlowCollectorStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(a.(*FlowCollectorStatus), b.(*v1beta2.FlowCollectorStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorStatus)(nil), (*FlowCollectorStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorStatus_To_v1beta1_FlowCollectorStatus(a.(*v1beta2.FlowCollectorStatus), b.(*FlowCollectorStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*MetricsServerConfig)(nil), (*v1beta2.MetricsServerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(a.(*MetricsServerConfig), b.(*v1beta2.MetricsServerConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.MetricsServerConfig)(nil), (*MetricsServerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_MetricsServerConfig_To_v1beta1_MetricsServerConfig(a.(*v1beta2.MetricsServerConfig), b.(*MetricsServerConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*OVNKubernetesConfig)(nil), (*v1beta2.OVNKubernetesConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(a.(*OVNKubernetesConfig), b.(*v1beta2.OVNKubernetesConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.OVNKubernetesConfig)(nil), (*OVNKubernetesConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig(a.(*v1beta2.OVNKubernetesConfig), b.(*OVNKubernetesConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*QuickFilter)(nil), (*v1beta2.QuickFilter)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_QuickFilter_To_v1beta2_QuickFilter(a.(*QuickFilter), b.(*v1beta2.QuickFilter), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.QuickFilter)(nil), (*QuickFilter)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_QuickFilter_To_v1beta1_QuickFilter(a.(*v1beta2.QuickFilter), b.(*QuickFilter), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SASLConfig)(nil), (*v1beta2.SASLConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_SASLConfig_To_v1beta2_SASLConfig(a.(*SASLConfig), b.(*v1beta2.SASLConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.SASLConfig)(nil), (*SASLConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_SASLConfig_To_v1beta1_SASLConfig(a.(*v1beta2.SASLConfig), b.(*SASLConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ServerTLS)(nil), (*v1beta2.ServerTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ServerTLS_To_v1beta2_ServerTLS(a.(*ServerTLS), b.(*v1beta2.ServerTLS), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.ServerTLS)(nil), (*ServerTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ServerTLS_To_v1beta1_ServerTLS(a.(*v1beta2.ServerTLS), b.(*ServerTLS), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*FlowCollectorLoki)(nil), (*v1beta2.FlowCollectorLoki)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(a.(*FlowCollectorLoki), b.(*v1beta2.FlowCollectorLoki), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.FLPMetrics)(nil), (*FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FLPMetrics_To_v1beta1_FLPMetrics(a.(*v1beta2.FLPMetrics), b.(*FLPMetrics), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.FlowCollectorFLP)(nil), (*FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(a.(*v1beta2.FlowCollectorFLP), b.(*FlowCollectorFLP), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.FlowCollectorLoki)(nil), (*FlowCollectorLoki)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorLoki_To_v1beta1_FlowCollectorLoki(a.(*v1beta2.FlowCollectorLoki), b.(*FlowCollectorLoki), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta1_CertificateReference_To_v1beta2_CertificateReference(in *CertificateReference, out *v1beta2.CertificateReference, s conversion.Scope) error { + out.Type = v1beta2.MountableType(in.Type) + out.Name = in.Name + out.Namespace = in.Namespace + out.CertFile = in.CertFile + out.CertKey = in.CertKey + return nil +} + +// Convert_v1beta1_CertificateReference_To_v1beta2_CertificateReference is an autogenerated conversion function. +func Convert_v1beta1_CertificateReference_To_v1beta2_CertificateReference(in *CertificateReference, out *v1beta2.CertificateReference, s conversion.Scope) error { + return autoConvert_v1beta1_CertificateReference_To_v1beta2_CertificateReference(in, out, s) +} + +func autoConvert_v1beta2_CertificateReference_To_v1beta1_CertificateReference(in *v1beta2.CertificateReference, out *CertificateReference, s conversion.Scope) error { + out.Type = MountableType(in.Type) + out.Name = in.Name + out.Namespace = in.Namespace + out.CertFile = in.CertFile + out.CertKey = in.CertKey + return nil +} + +// Convert_v1beta2_CertificateReference_To_v1beta1_CertificateReference is an autogenerated conversion function. +func Convert_v1beta2_CertificateReference_To_v1beta1_CertificateReference(in *v1beta2.CertificateReference, out *CertificateReference, s conversion.Scope) error { + return autoConvert_v1beta2_CertificateReference_To_v1beta1_CertificateReference(in, out, s) +} + +func autoConvert_v1beta1_ClientTLS_To_v1beta2_ClientTLS(in *ClientTLS, out *v1beta2.ClientTLS, s conversion.Scope) error { + out.Enable = in.Enable + out.InsecureSkipVerify = in.InsecureSkipVerify + if err := Convert_v1beta1_CertificateReference_To_v1beta2_CertificateReference(&in.CACert, &out.CACert, s); err != nil { + return err + } + if err := Convert_v1beta1_CertificateReference_To_v1beta2_CertificateReference(&in.UserCert, &out.UserCert, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_ClientTLS_To_v1beta2_ClientTLS is an autogenerated conversion function. +func Convert_v1beta1_ClientTLS_To_v1beta2_ClientTLS(in *ClientTLS, out *v1beta2.ClientTLS, s conversion.Scope) error { + return autoConvert_v1beta1_ClientTLS_To_v1beta2_ClientTLS(in, out, s) +} + +func autoConvert_v1beta2_ClientTLS_To_v1beta1_ClientTLS(in *v1beta2.ClientTLS, out *ClientTLS, s conversion.Scope) error { + out.Enable = in.Enable + out.InsecureSkipVerify = in.InsecureSkipVerify + if err := Convert_v1beta2_CertificateReference_To_v1beta1_CertificateReference(&in.CACert, &out.CACert, s); err != nil { + return err + } + if err := Convert_v1beta2_CertificateReference_To_v1beta1_CertificateReference(&in.UserCert, &out.UserCert, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta2_ClientTLS_To_v1beta1_ClientTLS is an autogenerated conversion function. +func Convert_v1beta2_ClientTLS_To_v1beta1_ClientTLS(in *v1beta2.ClientTLS, out *ClientTLS, s conversion.Scope) error { + return autoConvert_v1beta2_ClientTLS_To_v1beta1_ClientTLS(in, out, s) +} + +func autoConvert_v1beta1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(in *ClusterNetworkOperatorConfig, out *v1beta2.ClusterNetworkOperatorConfig, s conversion.Scope) error { + out.Namespace = in.Namespace + return nil +} + +// Convert_v1beta1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig is an autogenerated conversion function. +func Convert_v1beta1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(in *ClusterNetworkOperatorConfig, out *v1beta2.ClusterNetworkOperatorConfig, s conversion.Scope) error { + return autoConvert_v1beta1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(in, out, s) +} + +func autoConvert_v1beta2_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig(in *v1beta2.ClusterNetworkOperatorConfig, out *ClusterNetworkOperatorConfig, s conversion.Scope) error { + out.Namespace = in.Namespace + return nil +} + +// Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig is an autogenerated conversion function. +func Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig(in *v1beta2.ClusterNetworkOperatorConfig, out *ClusterNetworkOperatorConfig, s conversion.Scope) error { + return autoConvert_v1beta2_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig(in, out, s) +} + +func autoConvert_v1beta1_ConfigOrSecret_To_v1beta2_ConfigOrSecret(in *ConfigOrSecret, out *v1beta2.ConfigOrSecret, s conversion.Scope) error { + out.Type = v1beta2.MountableType(in.Type) + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} + +// Convert_v1beta1_ConfigOrSecret_To_v1beta2_ConfigOrSecret is an autogenerated conversion function. +func Convert_v1beta1_ConfigOrSecret_To_v1beta2_ConfigOrSecret(in *ConfigOrSecret, out *v1beta2.ConfigOrSecret, s conversion.Scope) error { + return autoConvert_v1beta1_ConfigOrSecret_To_v1beta2_ConfigOrSecret(in, out, s) +} + +func autoConvert_v1beta2_ConfigOrSecret_To_v1beta1_ConfigOrSecret(in *v1beta2.ConfigOrSecret, out *ConfigOrSecret, s conversion.Scope) error { + out.Type = MountableType(in.Type) + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} + +// Convert_v1beta2_ConfigOrSecret_To_v1beta1_ConfigOrSecret is an autogenerated conversion function. +func Convert_v1beta2_ConfigOrSecret_To_v1beta1_ConfigOrSecret(in *v1beta2.ConfigOrSecret, out *ConfigOrSecret, s conversion.Scope) error { + return autoConvert_v1beta2_ConfigOrSecret_To_v1beta1_ConfigOrSecret(in, out, s) +} + +func autoConvert_v1beta1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(in *ConsolePluginPortConfig, out *v1beta2.ConsolePluginPortConfig, s conversion.Scope) error { + out.Enable = (*bool)(unsafe.Pointer(in.Enable)) + out.PortNames = *(*map[string]string)(unsafe.Pointer(&in.PortNames)) + return nil +} + +// Convert_v1beta1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig is an autogenerated conversion function. +func Convert_v1beta1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(in *ConsolePluginPortConfig, out *v1beta2.ConsolePluginPortConfig, s conversion.Scope) error { + return autoConvert_v1beta1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(in, out, s) +} + +func autoConvert_v1beta2_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig(in *v1beta2.ConsolePluginPortConfig, out *ConsolePluginPortConfig, s conversion.Scope) error { + out.Enable = (*bool)(unsafe.Pointer(in.Enable)) + out.PortNames = *(*map[string]string)(unsafe.Pointer(&in.PortNames)) + return nil +} + +// Convert_v1beta2_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig is an autogenerated conversion function. +func Convert_v1beta2_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig(in *v1beta2.ConsolePluginPortConfig, out *ConsolePluginPortConfig, s conversion.Scope) error { + return autoConvert_v1beta2_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig(in, out, s) +} + +func autoConvert_v1beta1_DebugConfig_To_v1beta2_DebugConfig(in *DebugConfig, out *v1beta2.DebugConfig, s conversion.Scope) error { + out.Env = *(*map[string]string)(unsafe.Pointer(&in.Env)) + return nil +} + +// Convert_v1beta1_DebugConfig_To_v1beta2_DebugConfig is an autogenerated conversion function. +func Convert_v1beta1_DebugConfig_To_v1beta2_DebugConfig(in *DebugConfig, out *v1beta2.DebugConfig, s conversion.Scope) error { + return autoConvert_v1beta1_DebugConfig_To_v1beta2_DebugConfig(in, out, s) +} + +func autoConvert_v1beta2_DebugConfig_To_v1beta1_DebugConfig(in *v1beta2.DebugConfig, out *DebugConfig, s conversion.Scope) error { + out.Env = *(*map[string]string)(unsafe.Pointer(&in.Env)) + return nil +} + +// Convert_v1beta2_DebugConfig_To_v1beta1_DebugConfig is an autogenerated conversion function. +func Convert_v1beta2_DebugConfig_To_v1beta1_DebugConfig(in *v1beta2.DebugConfig, out *DebugConfig, s conversion.Scope) error { + return autoConvert_v1beta2_DebugConfig_To_v1beta1_DebugConfig(in, out, s) +} + +func autoConvert_v1beta1_FLPMetrics_To_v1beta2_FLPMetrics(in *FLPMetrics, out *v1beta2.FLPMetrics, s conversion.Scope) error { + if err := Convert_v1beta1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(&in.Server, &out.Server, s); err != nil { + return err + } + out.IgnoreTags = *(*[]string)(unsafe.Pointer(&in.IgnoreTags)) + out.DisableAlerts = *(*[]v1beta2.FLPAlert)(unsafe.Pointer(&in.DisableAlerts)) + return nil +} + +// Convert_v1beta1_FLPMetrics_To_v1beta2_FLPMetrics is an autogenerated conversion function. +func Convert_v1beta1_FLPMetrics_To_v1beta2_FLPMetrics(in *FLPMetrics, out *v1beta2.FLPMetrics, s conversion.Scope) error { + return autoConvert_v1beta1_FLPMetrics_To_v1beta2_FLPMetrics(in, out, s) +} + +func autoConvert_v1beta2_FLPMetrics_To_v1beta1_FLPMetrics(in *v1beta2.FLPMetrics, out *FLPMetrics, s conversion.Scope) error { + if err := Convert_v1beta2_MetricsServerConfig_To_v1beta1_MetricsServerConfig(&in.Server, &out.Server, s); err != nil { + return err + } + out.IgnoreTags = *(*[]string)(unsafe.Pointer(&in.IgnoreTags)) + out.DisableAlerts = *(*[]FLPAlert)(unsafe.Pointer(&in.DisableAlerts)) + return nil +} + +func autoConvert_v1beta1_FlowCollector_To_v1beta2_FlowCollector(in *FlowCollector, out *v1beta2.FlowCollector, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_FlowCollector_To_v1beta2_FlowCollector is an autogenerated conversion function. +func Convert_v1beta1_FlowCollector_To_v1beta2_FlowCollector(in *FlowCollector, out *v1beta2.FlowCollector, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollector_To_v1beta2_FlowCollector(in, out, s) +} + +func autoConvert_v1beta2_FlowCollector_To_v1beta1_FlowCollector(in *v1beta2.FlowCollector, out *FlowCollector, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta2_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta2_FlowCollectorStatus_To_v1beta1_FlowCollectorStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta2_FlowCollector_To_v1beta1_FlowCollector is an autogenerated conversion function. +func Convert_v1beta2_FlowCollector_To_v1beta1_FlowCollector(in *v1beta2.FlowCollector, out *FlowCollector, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollector_To_v1beta1_FlowCollector(in, out, s) +} + +func autoConvert_v1beta1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in *FlowCollectorAgent, out *v1beta2.FlowCollectorAgent, s conversion.Scope) error { + out.Type = in.Type + if err := Convert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { + return err + } + if err := Convert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in *FlowCollectorAgent, out *v1beta2.FlowCollectorAgent, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(in *v1beta2.FlowCollectorAgent, out *FlowCollectorAgent, s conversion.Scope) error { + out.Type = in.Type + if err := Convert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { + return err + } + if err := Convert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta2_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(in *v1beta2.FlowCollectorAgent, out *FlowCollectorAgent, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(in, out, s) +} + +func autoConvert_v1beta1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in *FlowCollectorConsolePlugin, out *v1beta2.FlowCollectorConsolePlugin, s conversion.Scope) error { + out.Enable = (*bool)(unsafe.Pointer(in.Enable)) + out.Register = (*bool)(unsafe.Pointer(in.Register)) + out.Replicas = (*int32)(unsafe.Pointer(in.Replicas)) + out.Port = in.Port + out.ImagePullPolicy = in.ImagePullPolicy + out.Resources = in.Resources + out.LogLevel = in.LogLevel + if err := Convert_v1beta1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(&in.Autoscaler, &out.Autoscaler, s); err != nil { + return err + } + if err := Convert_v1beta1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(&in.PortNaming, &out.PortNaming, s); err != nil { + return err + } + out.QuickFilters = *(*[]v1beta2.QuickFilter)(unsafe.Pointer(&in.QuickFilters)) + return nil +} + +// Convert_v1beta1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in *FlowCollectorConsolePlugin, out *v1beta2.FlowCollectorConsolePlugin, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(in *v1beta2.FlowCollectorConsolePlugin, out *FlowCollectorConsolePlugin, s conversion.Scope) error { + out.Enable = (*bool)(unsafe.Pointer(in.Enable)) + out.Register = (*bool)(unsafe.Pointer(in.Register)) + out.Replicas = (*int32)(unsafe.Pointer(in.Replicas)) + out.Port = in.Port + out.ImagePullPolicy = in.ImagePullPolicy + out.Resources = in.Resources + out.LogLevel = in.LogLevel + if err := Convert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(&in.Autoscaler, &out.Autoscaler, s); err != nil { + return err + } + if err := Convert_v1beta2_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig(&in.PortNaming, &out.PortNaming, s); err != nil { + return err + } + out.QuickFilters = *(*[]QuickFilter)(unsafe.Pointer(&in.QuickFilters)) + return nil +} + +// Convert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(in *v1beta2.FlowCollectorConsolePlugin, out *FlowCollectorConsolePlugin, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(in, out, s) +} + +func autoConvert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta2.FlowCollectorEBPF, s conversion.Scope) error { + out.ImagePullPolicy = in.ImagePullPolicy + out.Resources = in.Resources + out.Sampling = (*int32)(unsafe.Pointer(in.Sampling)) + out.CacheActiveTimeout = in.CacheActiveTimeout + out.CacheMaxFlows = in.CacheMaxFlows + out.Interfaces = *(*[]string)(unsafe.Pointer(&in.Interfaces)) + out.ExcludeInterfaces = *(*[]string)(unsafe.Pointer(&in.ExcludeInterfaces)) + out.LogLevel = in.LogLevel + out.Privileged = in.Privileged + out.KafkaBatchSize = in.KafkaBatchSize + if err := Convert_v1beta1_DebugConfig_To_v1beta2_DebugConfig(&in.Debug, &out.Debug, s); err != nil { + return err + } + out.EnablePktDrop = (*bool)(unsafe.Pointer(in.EnablePktDrop)) + out.EnableDNSTracking = (*bool)(unsafe.Pointer(in.EnableDNSTracking)) + return nil +} + +// Convert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta2.FlowCollectorEBPF, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEBPF, out *FlowCollectorEBPF, s conversion.Scope) error { + out.ImagePullPolicy = in.ImagePullPolicy + out.Resources = in.Resources + out.Sampling = (*int32)(unsafe.Pointer(in.Sampling)) + out.CacheActiveTimeout = in.CacheActiveTimeout + out.CacheMaxFlows = in.CacheMaxFlows + out.Interfaces = *(*[]string)(unsafe.Pointer(&in.Interfaces)) + out.ExcludeInterfaces = *(*[]string)(unsafe.Pointer(&in.ExcludeInterfaces)) + out.LogLevel = in.LogLevel + out.Privileged = in.Privileged + out.KafkaBatchSize = in.KafkaBatchSize + if err := Convert_v1beta2_DebugConfig_To_v1beta1_DebugConfig(&in.Debug, &out.Debug, s); err != nil { + return err + } + out.EnablePktDrop = (*bool)(unsafe.Pointer(in.EnablePktDrop)) + out.EnableDNSTracking = (*bool)(unsafe.Pointer(in.EnableDNSTracking)) + return nil +} + +// Convert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEBPF, out *FlowCollectorEBPF, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(in, out, s) +} + +func autoConvert_v1beta1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in *FlowCollectorExporter, out *v1beta2.FlowCollectorExporter, s conversion.Scope) error { + out.Type = v1beta2.ExporterType(in.Type) + if err := Convert_v1beta1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { + return err + } + if err := Convert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in *FlowCollectorExporter, out *v1beta2.FlowCollectorExporter, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter(in *v1beta2.FlowCollectorExporter, out *FlowCollectorExporter, s conversion.Scope) error { + out.Type = ExporterType(in.Type) + if err := Convert_v1beta2_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { + return err + } + if err := Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta2_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter(in *v1beta2.FlowCollectorExporter, out *FlowCollectorExporter, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter(in, out, s) +} + +func autoConvert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowCollectorFLP, out *v1beta2.FlowCollectorFLP, s conversion.Scope) error { + out.Port = in.Port + out.HealthPort = in.HealthPort + out.ProfilePort = in.ProfilePort + out.ImagePullPolicy = in.ImagePullPolicy + if err := Convert_v1beta1_FLPMetrics_To_v1beta2_FLPMetrics(&in.Metrics, &out.Metrics, s); err != nil { + return err + } + out.LogLevel = in.LogLevel + out.Resources = in.Resources + out.EnableKubeProbes = (*bool)(unsafe.Pointer(in.EnableKubeProbes)) + out.DropUnusedFields = (*bool)(unsafe.Pointer(in.DropUnusedFields)) + out.KafkaConsumerReplicas = (*int32)(unsafe.Pointer(in.KafkaConsumerReplicas)) + if err := Convert_v1beta1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(&in.KafkaConsumerAutoscaler, &out.KafkaConsumerAutoscaler, s); err != nil { + return err + } + out.KafkaConsumerQueueCapacity = in.KafkaConsumerQueueCapacity + out.KafkaConsumerBatchSize = in.KafkaConsumerBatchSize + out.LogTypes = (*string)(unsafe.Pointer(in.LogTypes)) + out.ConversationHeartbeatInterval = (*v1.Duration)(unsafe.Pointer(in.ConversationHeartbeatInterval)) + out.ConversationEndTimeout = (*v1.Duration)(unsafe.Pointer(in.ConversationEndTimeout)) + out.ConversationTerminatingTimeout = (*v1.Duration)(unsafe.Pointer(in.ConversationTerminatingTimeout)) + if err := Convert_v1beta1_DebugConfig_To_v1beta2_DebugConfig(&in.Debug, &out.Debug, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowCollectorFLP, out *v1beta2.FlowCollectorFLP, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(in *v1beta2.FlowCollectorFLP, out *FlowCollectorFLP, s conversion.Scope) error { + out.Port = in.Port + out.HealthPort = in.HealthPort + out.ProfilePort = in.ProfilePort + out.ImagePullPolicy = in.ImagePullPolicy + if err := Convert_v1beta2_FLPMetrics_To_v1beta1_FLPMetrics(&in.Metrics, &out.Metrics, s); err != nil { + return err + } + out.LogLevel = in.LogLevel + out.Resources = in.Resources + out.EnableKubeProbes = (*bool)(unsafe.Pointer(in.EnableKubeProbes)) + out.DropUnusedFields = (*bool)(unsafe.Pointer(in.DropUnusedFields)) + out.KafkaConsumerReplicas = (*int32)(unsafe.Pointer(in.KafkaConsumerReplicas)) + if err := Convert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(&in.KafkaConsumerAutoscaler, &out.KafkaConsumerAutoscaler, s); err != nil { + return err + } + out.KafkaConsumerQueueCapacity = in.KafkaConsumerQueueCapacity + out.KafkaConsumerBatchSize = in.KafkaConsumerBatchSize + out.LogTypes = (*string)(unsafe.Pointer(in.LogTypes)) + out.ConversationHeartbeatInterval = (*v1.Duration)(unsafe.Pointer(in.ConversationHeartbeatInterval)) + out.ConversationEndTimeout = (*v1.Duration)(unsafe.Pointer(in.ConversationEndTimeout)) + out.ConversationTerminatingTimeout = (*v1.Duration)(unsafe.Pointer(in.ConversationTerminatingTimeout)) + if err := Convert_v1beta2_DebugConfig_To_v1beta1_DebugConfig(&in.Debug, &out.Debug, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in *FlowCollectorHPA, out *v1beta2.FlowCollectorHPA, s conversion.Scope) error { + out.Status = in.Status + out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) + out.MaxReplicas = in.MaxReplicas + out.Metrics = *(*[]v2.MetricSpec)(unsafe.Pointer(&in.Metrics)) + return nil +} + +// Convert_v1beta1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in *FlowCollectorHPA, out *v1beta2.FlowCollectorHPA, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(in *v1beta2.FlowCollectorHPA, out *FlowCollectorHPA, s conversion.Scope) error { + out.Status = in.Status + out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) + out.MaxReplicas = in.MaxReplicas + out.Metrics = *(*[]v2.MetricSpec)(unsafe.Pointer(&in.Metrics)) + return nil +} + +// Convert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(in *v1beta2.FlowCollectorHPA, out *FlowCollectorHPA, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(in, out, s) +} + +func autoConvert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta2.FlowCollectorIPFIX, s conversion.Scope) error { + out.CacheActiveTimeout = in.CacheActiveTimeout + out.CacheMaxFlows = in.CacheMaxFlows + out.Sampling = in.Sampling + out.ForceSampleAll = in.ForceSampleAll + if err := Convert_v1beta1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { + return err + } + if err := Convert_v1beta1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta2.FlowCollectorIPFIX, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(in *v1beta2.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { + out.CacheActiveTimeout = in.CacheActiveTimeout + out.CacheMaxFlows = in.CacheMaxFlows + out.Sampling = in.Sampling + out.ForceSampleAll = in.ForceSampleAll + if err := Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { + return err + } + if err := Convert_v1beta2_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(in *v1beta2.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(in, out, s) +} + +func autoConvert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in *FlowCollectorIPFIXReceiver, out *v1beta2.FlowCollectorIPFIXReceiver, s conversion.Scope) error { + out.TargetHost = in.TargetHost + out.TargetPort = in.TargetPort + out.Transport = in.Transport + return nil +} + +// Convert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in *FlowCollectorIPFIXReceiver, out *v1beta2.FlowCollectorIPFIXReceiver, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(in *v1beta2.FlowCollectorIPFIXReceiver, out *FlowCollectorIPFIXReceiver, s conversion.Scope) error { + out.TargetHost = in.TargetHost + out.TargetPort = in.TargetPort + out.Transport = in.Transport + return nil +} + +// Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(in *v1beta2.FlowCollectorIPFIXReceiver, out *FlowCollectorIPFIXReceiver, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(in, out, s) +} + +func autoConvert_v1beta1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(in *FlowCollectorKafka, out *v1beta2.FlowCollectorKafka, s conversion.Scope) error { + out.Address = in.Address + out.Topic = in.Topic + if err := Convert_v1beta1_ClientTLS_To_v1beta2_ClientTLS(&in.TLS, &out.TLS, s); err != nil { + return err + } + // INFO: in.SASL opted out of conversion generation + return nil +} + +// Convert_v1beta1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(in *FlowCollectorKafka, out *v1beta2.FlowCollectorKafka, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(in *v1beta2.FlowCollectorKafka, out *FlowCollectorKafka, s conversion.Scope) error { + out.Address = in.Address + out.Topic = in.Topic + if err := Convert_v1beta2_ClientTLS_To_v1beta1_ClientTLS(&in.TLS, &out.TLS, s); err != nil { + return err + } + // INFO: in.SASL opted out of conversion generation + return nil +} + +// Convert_v1beta2_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(in *v1beta2.FlowCollectorKafka, out *FlowCollectorKafka, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(in, out, s) +} + +func autoConvert_v1beta1_FlowCollectorList_To_v1beta2_FlowCollectorList(in *FlowCollectorList, out *v1beta2.FlowCollectorList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1beta2.FlowCollector, len(*in)) + for i := range *in { + if err := Convert_v1beta1_FlowCollector_To_v1beta2_FlowCollector(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1beta1_FlowCollectorList_To_v1beta2_FlowCollectorList is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorList_To_v1beta2_FlowCollectorList(in *FlowCollectorList, out *v1beta2.FlowCollectorList, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorList_To_v1beta2_FlowCollectorList(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorList_To_v1beta1_FlowCollectorList(in *v1beta2.FlowCollectorList, out *FlowCollectorList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FlowCollector, len(*in)) + for i := range *in { + if err := Convert_v1beta2_FlowCollector_To_v1beta1_FlowCollector(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1beta2_FlowCollectorList_To_v1beta1_FlowCollectorList is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorList_To_v1beta1_FlowCollectorList(in *v1beta2.FlowCollectorList, out *FlowCollectorList, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorList_To_v1beta1_FlowCollectorList(in, out, s) +} + +func autoConvert_v1beta1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in *FlowCollectorLoki, out *v1beta2.FlowCollectorLoki, s conversion.Scope) error { + out.Enable = (*bool)(unsafe.Pointer(in.Enable)) + // WARNING: in.URL requires manual conversion: does not exist in peer-type + // WARNING: in.QuerierURL requires manual conversion: does not exist in peer-type + // WARNING: in.StatusURL requires manual conversion: does not exist in peer-type + // WARNING: in.TenantID requires manual conversion: does not exist in peer-type + // WARNING: in.AuthToken requires manual conversion: does not exist in peer-type + out.BatchWait = (*v1.Duration)(unsafe.Pointer(in.BatchWait)) + out.BatchSize = in.BatchSize + out.Timeout = (*v1.Duration)(unsafe.Pointer(in.Timeout)) + out.MinBackoff = (*v1.Duration)(unsafe.Pointer(in.MinBackoff)) + out.MaxBackoff = (*v1.Duration)(unsafe.Pointer(in.MaxBackoff)) + out.MaxRetries = (*int32)(unsafe.Pointer(in.MaxRetries)) + out.StaticLabels = *(*map[string]string)(unsafe.Pointer(&in.StaticLabels)) + // WARNING: in.TLS requires manual conversion: does not exist in peer-type + // WARNING: in.StatusTLS requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_v1beta2_FlowCollectorLoki_To_v1beta1_FlowCollectorLoki(in *v1beta2.FlowCollectorLoki, out *FlowCollectorLoki, s conversion.Scope) error { + // WARNING: in.Mode requires manual conversion: does not exist in peer-type + // WARNING: in.Manual requires manual conversion: does not exist in peer-type + // WARNING: in.LokiStack requires manual conversion: does not exist in peer-type + out.Enable = (*bool)(unsafe.Pointer(in.Enable)) + out.BatchWait = (*v1.Duration)(unsafe.Pointer(in.BatchWait)) + out.BatchSize = in.BatchSize + out.Timeout = (*v1.Duration)(unsafe.Pointer(in.Timeout)) + out.MinBackoff = (*v1.Duration)(unsafe.Pointer(in.MinBackoff)) + out.MaxBackoff = (*v1.Duration)(unsafe.Pointer(in.MaxBackoff)) + out.MaxRetries = (*int32)(unsafe.Pointer(in.MaxRetries)) + out.StaticLabels = *(*map[string]string)(unsafe.Pointer(&in.StaticLabels)) + return nil +} + +func autoConvert_v1beta1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in *FlowCollectorSpec, out *v1beta2.FlowCollectorSpec, s conversion.Scope) error { + out.Namespace = in.Namespace + if err := Convert_v1beta1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(&in.Agent, &out.Agent, s); err != nil { + return err + } + if err := Convert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(&in.Processor, &out.Processor, s); err != nil { + return err + } + // INFO: in.Loki opted out of conversion generation + if err := Convert_v1beta1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(&in.ConsolePlugin, &out.ConsolePlugin, s); err != nil { + return err + } + out.DeploymentModel = in.DeploymentModel + if err := Convert_v1beta1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { + return err + } + // INFO: in.Exporters opted out of conversion generation + return nil +} + +// Convert_v1beta1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in *FlowCollectorSpec, out *v1beta2.FlowCollectorSpec, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(in *v1beta2.FlowCollectorSpec, out *FlowCollectorSpec, s conversion.Scope) error { + out.Namespace = in.Namespace + if err := Convert_v1beta2_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(&in.Agent, &out.Agent, s); err != nil { + return err + } + if err := Convert_v1beta2_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(&in.Processor, &out.Processor, s); err != nil { + return err + } + // INFO: in.Loki opted out of conversion generation + if err := Convert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(&in.ConsolePlugin, &out.ConsolePlugin, s); err != nil { + return err + } + out.DeploymentModel = in.DeploymentModel + if err := Convert_v1beta2_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { + return err + } + // INFO: in.Exporters opted out of conversion generation + return nil +} + +// Convert_v1beta2_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(in *v1beta2.FlowCollectorSpec, out *FlowCollectorSpec, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(in, out, s) +} + +func autoConvert_v1beta1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(in *FlowCollectorStatus, out *v1beta2.FlowCollectorStatus, s conversion.Scope) error { + out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions)) + out.Namespace = in.Namespace + return nil +} + +// Convert_v1beta1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(in *FlowCollectorStatus, out *v1beta2.FlowCollectorStatus, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorStatus_To_v1beta1_FlowCollectorStatus(in *v1beta2.FlowCollectorStatus, out *FlowCollectorStatus, s conversion.Scope) error { + out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions)) + out.Namespace = in.Namespace + return nil +} + +// Convert_v1beta2_FlowCollectorStatus_To_v1beta1_FlowCollectorStatus is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorStatus_To_v1beta1_FlowCollectorStatus(in *v1beta2.FlowCollectorStatus, out *FlowCollectorStatus, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorStatus_To_v1beta1_FlowCollectorStatus(in, out, s) +} + +func autoConvert_v1beta1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(in *MetricsServerConfig, out *v1beta2.MetricsServerConfig, s conversion.Scope) error { + out.Port = in.Port + if err := Convert_v1beta1_ServerTLS_To_v1beta2_ServerTLS(&in.TLS, &out.TLS, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_MetricsServerConfig_To_v1beta2_MetricsServerConfig is an autogenerated conversion function. +func Convert_v1beta1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(in *MetricsServerConfig, out *v1beta2.MetricsServerConfig, s conversion.Scope) error { + return autoConvert_v1beta1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(in, out, s) +} + +func autoConvert_v1beta2_MetricsServerConfig_To_v1beta1_MetricsServerConfig(in *v1beta2.MetricsServerConfig, out *MetricsServerConfig, s conversion.Scope) error { + out.Port = in.Port + if err := Convert_v1beta2_ServerTLS_To_v1beta1_ServerTLS(&in.TLS, &out.TLS, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta2_MetricsServerConfig_To_v1beta1_MetricsServerConfig is an autogenerated conversion function. +func Convert_v1beta2_MetricsServerConfig_To_v1beta1_MetricsServerConfig(in *v1beta2.MetricsServerConfig, out *MetricsServerConfig, s conversion.Scope) error { + return autoConvert_v1beta2_MetricsServerConfig_To_v1beta1_MetricsServerConfig(in, out, s) +} + +func autoConvert_v1beta1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(in *OVNKubernetesConfig, out *v1beta2.OVNKubernetesConfig, s conversion.Scope) error { + out.Namespace = in.Namespace + out.DaemonSetName = in.DaemonSetName + out.ContainerName = in.ContainerName + return nil +} + +// Convert_v1beta1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig is an autogenerated conversion function. +func Convert_v1beta1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(in *OVNKubernetesConfig, out *v1beta2.OVNKubernetesConfig, s conversion.Scope) error { + return autoConvert_v1beta1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(in, out, s) +} + +func autoConvert_v1beta2_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig(in *v1beta2.OVNKubernetesConfig, out *OVNKubernetesConfig, s conversion.Scope) error { + out.Namespace = in.Namespace + out.DaemonSetName = in.DaemonSetName + out.ContainerName = in.ContainerName + return nil +} + +// Convert_v1beta2_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig is an autogenerated conversion function. +func Convert_v1beta2_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig(in *v1beta2.OVNKubernetesConfig, out *OVNKubernetesConfig, s conversion.Scope) error { + return autoConvert_v1beta2_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig(in, out, s) +} + +func autoConvert_v1beta1_QuickFilter_To_v1beta2_QuickFilter(in *QuickFilter, out *v1beta2.QuickFilter, s conversion.Scope) error { + out.Name = in.Name + out.Filter = *(*map[string]string)(unsafe.Pointer(&in.Filter)) + out.Default = in.Default + return nil +} + +// Convert_v1beta1_QuickFilter_To_v1beta2_QuickFilter is an autogenerated conversion function. +func Convert_v1beta1_QuickFilter_To_v1beta2_QuickFilter(in *QuickFilter, out *v1beta2.QuickFilter, s conversion.Scope) error { + return autoConvert_v1beta1_QuickFilter_To_v1beta2_QuickFilter(in, out, s) +} + +func autoConvert_v1beta2_QuickFilter_To_v1beta1_QuickFilter(in *v1beta2.QuickFilter, out *QuickFilter, s conversion.Scope) error { + out.Name = in.Name + out.Filter = *(*map[string]string)(unsafe.Pointer(&in.Filter)) + out.Default = in.Default + return nil +} + +// Convert_v1beta2_QuickFilter_To_v1beta1_QuickFilter is an autogenerated conversion function. +func Convert_v1beta2_QuickFilter_To_v1beta1_QuickFilter(in *v1beta2.QuickFilter, out *QuickFilter, s conversion.Scope) error { + return autoConvert_v1beta2_QuickFilter_To_v1beta1_QuickFilter(in, out, s) +} + +func autoConvert_v1beta1_SASLConfig_To_v1beta2_SASLConfig(in *SASLConfig, out *v1beta2.SASLConfig, s conversion.Scope) error { + out.Type = v1beta2.SASLType(in.Type) + if err := Convert_v1beta1_ConfigOrSecret_To_v1beta2_ConfigOrSecret(&in.Reference, &out.Reference, s); err != nil { + return err + } + out.ClientIDKey = in.ClientIDKey + out.ClientSecretKey = in.ClientSecretKey + return nil +} + +// Convert_v1beta1_SASLConfig_To_v1beta2_SASLConfig is an autogenerated conversion function. +func Convert_v1beta1_SASLConfig_To_v1beta2_SASLConfig(in *SASLConfig, out *v1beta2.SASLConfig, s conversion.Scope) error { + return autoConvert_v1beta1_SASLConfig_To_v1beta2_SASLConfig(in, out, s) +} + +func autoConvert_v1beta2_SASLConfig_To_v1beta1_SASLConfig(in *v1beta2.SASLConfig, out *SASLConfig, s conversion.Scope) error { + out.Type = SASLType(in.Type) + if err := Convert_v1beta2_ConfigOrSecret_To_v1beta1_ConfigOrSecret(&in.Reference, &out.Reference, s); err != nil { + return err + } + out.ClientIDKey = in.ClientIDKey + out.ClientSecretKey = in.ClientSecretKey + return nil +} + +// Convert_v1beta2_SASLConfig_To_v1beta1_SASLConfig is an autogenerated conversion function. +func Convert_v1beta2_SASLConfig_To_v1beta1_SASLConfig(in *v1beta2.SASLConfig, out *SASLConfig, s conversion.Scope) error { + return autoConvert_v1beta2_SASLConfig_To_v1beta1_SASLConfig(in, out, s) +} + +func autoConvert_v1beta1_ServerTLS_To_v1beta2_ServerTLS(in *ServerTLS, out *v1beta2.ServerTLS, s conversion.Scope) error { + out.Type = v1beta2.ServerTLSConfigType(in.Type) + out.Provided = (*v1beta2.CertificateReference)(unsafe.Pointer(in.Provided)) + return nil +} + +// Convert_v1beta1_ServerTLS_To_v1beta2_ServerTLS is an autogenerated conversion function. +func Convert_v1beta1_ServerTLS_To_v1beta2_ServerTLS(in *ServerTLS, out *v1beta2.ServerTLS, s conversion.Scope) error { + return autoConvert_v1beta1_ServerTLS_To_v1beta2_ServerTLS(in, out, s) +} + +func autoConvert_v1beta2_ServerTLS_To_v1beta1_ServerTLS(in *v1beta2.ServerTLS, out *ServerTLS, s conversion.Scope) error { + out.Type = ServerTLSConfigType(in.Type) + out.Provided = (*CertificateReference)(unsafe.Pointer(in.Provided)) + return nil +} + +// Convert_v1beta2_ServerTLS_To_v1beta1_ServerTLS is an autogenerated conversion function. +func Convert_v1beta2_ServerTLS_To_v1beta1_ServerTLS(in *v1beta2.ServerTLS, out *ServerTLS, s conversion.Scope) error { + return autoConvert_v1beta2_ServerTLS_To_v1beta1_ServerTLS(in, out, s) +} diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index 7caa6955c..09748c294 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -24,7 +24,7 @@ package v1beta1 import ( "k8s.io/api/autoscaling/v2" "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/api/v1beta2/doc.go b/api/v1beta2/doc.go new file mode 100644 index 000000000..cfb9ccd8d --- /dev/null +++ b/api/v1beta2/doc.go @@ -0,0 +1,15 @@ +/* +Copyright 2019 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta2 contains the v1beta2 API implementation. +package v1beta2 diff --git a/api/v1beta2/flowcollector_types.go b/api/v1beta2/flowcollector_types.go new file mode 100644 index 000000000..c8c69caf2 --- /dev/null +++ b/api/v1beta2/flowcollector_types.go @@ -0,0 +1,867 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1beta2 + +import ( + ascv2 "k8s.io/api/autoscaling/v2" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +const ( + AgentIPFIX = "IPFIX" + AgentEBPF = "EBPF" + DeploymentModelDirect = "DIRECT" + DeploymentModelKafka = "KAFKA" +) + +// Please notice that the FlowCollectorSpec's properties MUST redefine one of the default +// values to force the definition of the section when it is not provided by the manifest. +// This will cause that the remaining default fields will be set according to their definition. +// Otherwise, omitting the sections in the manifest would lead to zero-valued properties. +// This is a workaround for the related issue: +// https://github.com/kubernetes-sigs/controller-tools/issues/622 + +// Defines the desired state of the FlowCollector resource. +//

+// *: the mention of "unsupported", or "deprecated" for a feature throughout this document means that this feature +// is not officially supported by Red Hat. It might have been, for instance, contributed by the community +// and accepted without a formal agreement for maintenance. The product maintainers might provide some support +// for these features as a best effort only. +type FlowCollectorSpec struct { + // Important: Run "make generate" to regenerate code after modifying this file + + // Namespace where NetObserv pods are deployed. + // If empty, the namespace of the operator is going to be used. + // +kubebuilder:default:=netobserv + Namespace string `json:"namespace,omitempty"` + + // Agent configuration for flows extraction. + Agent FlowCollectorAgent `json:"agent,omitempty"` + + // `processor` defines the settings of the component that receives the flows from the agent, + // enriches them, generates metrics, and forwards them to the Loki persistence layer and/or any available exporter. + Processor FlowCollectorFLP `json:"processor,omitempty"` + + // loki, the flow store, client settings. + // +k8s:conversion-gen=false + Loki FlowCollectorLoki `json:"loki,omitempty"` + + // `consolePlugin` defines the settings related to the OpenShift Console plugin, when available. + ConsolePlugin FlowCollectorConsolePlugin `json:"consolePlugin,omitempty"` + + // `deploymentModel` defines the desired type of deployment for flow processing. Possible values are:
+ // - `DIRECT` (default) to make the flow processor listening directly from the agents.
+ // - `KAFKA` to make flows sent to a Kafka pipeline before consumption by the processor.
+ // Kafka can provide better scalability, resiliency, and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka). + // +unionDiscriminator + // +kubebuilder:validation:Enum:="DIRECT";"KAFKA" + // +kubebuilder:default:=DIRECT + DeploymentModel string `json:"deploymentModel,omitempty"` + + // Kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the `spec.deploymentModel` is `KAFKA`. + // +optional + Kafka FlowCollectorKafka `json:"kafka,omitempty"` + + // `exporters` define additional optional exporters for custom consumption or storage. + // +optional + // +k8s:conversion-gen=false + Exporters []*FlowCollectorExporter `json:"exporters"` +} + +// `FlowCollectorAgent` is a discriminated union that allows to select either ipfix or ebpf, but does not +// allow defining both fields. +// +union +type FlowCollectorAgent struct { + // `type` selects the flows tracing agent. Possible values are:
+ // - `EBPF` (default) to use NetObserv eBPF agent.
+ // - `IPFIX` - deprecated (*) - to use the legacy IPFIX collector.
+ // `EBPF` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. + // `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, + // but they would require manual configuration). + // +unionDiscriminator + // +kubebuilder:validation:Enum:="EBPF";"IPFIX" + // +kubebuilder:default:=EBPF + Type string `json:"type,omitempty"` + + // `ipfix` - deprecated (*) - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` + // is set to `IPFIX`. + // +optional + IPFIX FlowCollectorIPFIX `json:"ipfix,omitempty"` + + // `ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` + // is set to `EBPF`. + // +optional + EBPF FlowCollectorEBPF `json:"ebpf,omitempty"` +} + +// `FlowCollectorIPFIX` defines a FlowCollector that uses IPFIX on OVN-Kubernetes to collect the +// flows information +type FlowCollectorIPFIX struct { + // Important: Run "make generate" to regenerate code after modifying this file + + //+kubebuilder:validation:Pattern:=^\d+(ns|ms|s|m)?$ + //+kubebuilder:default:="20s" + // `cacheActiveTimeout` is the max period during which the reporter will aggregate flows before sending + CacheActiveTimeout string `json:"cacheActiveTimeout,omitempty" mapstructure:"cacheActiveTimeout,omitempty"` + + //+kubebuilder:validation:Minimum=0 + //+kubebuilder:default:=400 + // `cacheMaxFlows` is the max number of flows in an aggregate; when reached, the reporter sends the flows + CacheMaxFlows int32 `json:"cacheMaxFlows,omitempty" mapstructure:"cacheMaxFlows,omitempty"` + + //+kubebuilder:validation:Minimum=2 + //+kubebuilder:default:=400 + // `sampling` is the sampling rate on the reporter. 100 means one flow on 100 is sent. + // To ensure cluster stability, it is not possible to set a value below 2. + // If you really want to sample every packet, which might impact the cluster stability, + // refer to `forceSampleAll`. Alternatively, you can use the eBPF Agent instead of IPFIX. + Sampling int32 `json:"sampling,omitempty" mapstructure:"sampling,omitempty"` + + //+kubebuilder:default:=false + // `forceSampleAll` allows disabling sampling in the IPFIX-based flow reporter. + // It is not recommended to sample all the traffic with IPFIX, as it might generate cluster instability. + // If you REALLY want to do that, set this flag to true. Use at your own risk. + // When it is set to true, the value of `sampling` is ignored. + ForceSampleAll bool `json:"forceSampleAll,omitempty" mapstructure:"-"` + + // `clusterNetworkOperator` defines the settings related to the OpenShift Cluster Network Operator, when available. + ClusterNetworkOperator ClusterNetworkOperatorConfig `json:"clusterNetworkOperator,omitempty" mapstructure:"-"` + + // `ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead. + OVNKubernetes OVNKubernetesConfig `json:"ovnKubernetes,omitempty" mapstructure:"-"` +} + +// `FlowCollectorEBPF` defines a FlowCollector that uses eBPF to collect the flows information +type FlowCollectorEBPF struct { + // Important: Run "make generate" to regenerate code after modifying this file + + //+kubebuilder:validation:Enum=IfNotPresent;Always;Never + //+kubebuilder:default:=IfNotPresent + // `imagePullPolicy` is the Kubernetes pull policy for the image defined above + ImagePullPolicy string `json:"imagePullPolicy,omitempty"` + + //+kubebuilder:default:={requests:{memory:"50Mi",cpu:"100m"},limits:{memory:"800Mi"}} + // `resources` are the compute resources required by this container. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + + // Sampling rate of the flow reporter. 100 means one flow on 100 is sent. 0 or 1 means all flows are sampled. + //+kubebuilder:validation:Minimum=0 + //+kubebuilder:default:=50 + //+optional + Sampling *int32 `json:"sampling,omitempty"` + + // `cacheActiveTimeout` is the max period during which the reporter will aggregate flows before sending. + // Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, + // however you can expect higher memory consumption and an increased latency in the flow collection. + //+kubebuilder:validation:Pattern:=^\d+(ns|ms|s|m)?$ + //+kubebuilder:default:="5s" + CacheActiveTimeout string `json:"cacheActiveTimeout,omitempty"` + + // `cacheMaxFlows` is the max number of flows in an aggregate; when reached, the reporter sends the flows. + // Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, + // however you can expect higher memory consumption and an increased latency in the flow collection. + //+kubebuilder:validation:Minimum=1 + //+kubebuilder:default:=100000 + CacheMaxFlows int32 `json:"cacheMaxFlows,omitempty"` + + // `interfaces` contains the interface names from where flows will be collected. If empty, the agent + // will fetch all the interfaces in the system, excepting the ones listed in ExcludeInterfaces. + // An entry is enclosed by slashes, such as `/br-/`, is matched as a regular expression. + // Otherwise it is matched as a case-sensitive string. + //+optional + Interfaces []string `json:"interfaces"` + + // `excludeInterfaces` contains the interface names that will be excluded from flow tracing. + // An entry is enclosed by slashes, such as `/br-/`, is matched as a regular expression. + // Otherwise it is matched as a case-sensitive string. + //+kubebuilder:default=lo; + //+optional + ExcludeInterfaces []string `json:"excludeInterfaces"` + + //+kubebuilder:validation:Enum=trace;debug;info;warn;error;fatal;panic + //+kubebuilder:default:=info + // `logLevel` defines the log level for the NetObserv eBPF Agent + LogLevel string `json:"logLevel,omitempty"` + + // Privileged mode for the eBPF Agent container. In general this setting can be ignored or set to false: + // in that case, the operator will set granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) + // to the container, to enable its correct operation. + // If for some reason these capabilities cannot be set, such as if an old kernel version not knowing CAP_BPF + // is in use, then you can turn on this mode for more global privileges. + // +optional + Privileged bool `json:"privileged,omitempty"` + + //+kubebuilder:default:=10485760 + // +optional + // `kafkaBatchSize` limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 10MB. + KafkaBatchSize int `json:"kafkaBatchSize"` + + // `debug` allows setting some aspects of the internal configuration of the eBPF agent. + // This section is aimed exclusively for debugging and fine-grained performance optimizations, + // such as GOGC and GOMAXPROCS env vars. Users setting its values do it at their own risk. + // +optional + Debug DebugConfig `json:"debug,omitempty"` + + // Enable the Packets drop flows logging feature. This feature requires mounting + // the kernel debug filesystem, so the eBPF pod has to run as privileged. + // If the spec.agent.eBPF.privileged parameter is not set, an error is reported. + //+kubebuilder:default:=false + //+optional + EnablePktDrop *bool `json:"enablePktDrop,omitempty"` + + // Enable the DNS tracking feature. This feature requires mounting + // the kernel debug filesystem hence the eBPF pod has to run as privileged. + // If the spec.agent.eBPF.privileged parameter is not set, an error is reported. + //+kubebuilder:default:=false + //+optional + EnableDNSTracking *bool `json:"enableDNSTracking,omitempty"` +} + +// `FlowCollectorKafka` defines the desired Kafka config of FlowCollector +type FlowCollectorKafka struct { + // Important: Run "make generate" to regenerate code after modifying this file + + //+kubebuilder:default:="" + // Address of the Kafka server + Address string `json:"address"` + + //+kubebuilder:default:="" + // Kafka topic to use. It must exist, NetObserv will not create it. + Topic string `json:"topic"` + + // TLS client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. + // +optional + TLS ClientTLS `json:"tls"` + + // SASL authentication configuration. Unsupported (*) + // +optional + // +k8s:conversion-gen=false + SASL SASLConfig `json:"sasl"` +} + +type FlowCollectorIPFIXReceiver struct { + //+kubebuilder:default:="" + // Address of the IPFIX external receiver + TargetHost string `json:"targetHost"` + + // Port for the IPFIX external receiver + TargetPort int `json:"targetPort"` + + // Transport protocol (`TCP` or `UDP`) to be used for the IPFIX connection, defaults to `TCP`. + // +unionDiscriminator + // +kubebuilder:validation:Enum:="TCP";"UDP" + // +optional + Transport string `json:"transport,omitempty"` +} + +const ( + ServerTLSDisabled = "DISABLED" + ServerTLSProvided = "PROVIDED" + ServerTLSAuto = "AUTO" +) + +type ServerTLSConfigType string + +// `ServerTLS` define the TLS configuration, server side +type ServerTLS struct { + // Select the type of TLS configuration:
+ // - `DISABLED` (default) to not configure TLS for the endpoint. + // - `PROVIDED` to manually provide cert file and a key file. + // - `AUTO` to use OpenShift auto generated certificate using annotations. + // +unionDiscriminator + // +kubebuilder:validation:Enum:="DISABLED";"PROVIDED";"AUTO" + // +kubebuilder:validation:Required + //+kubebuilder:default:="DISABLED" + Type ServerTLSConfigType `json:"type,omitempty"` + + // TLS configuration when `type` is set to `PROVIDED`. + // +optional + Provided *CertificateReference `json:"provided"` +} + +// `MetricsServerConfig` define the metrics server endpoint configuration for Prometheus scraper +type MetricsServerConfig struct { + + //+kubebuilder:validation:Minimum=1 + //+kubebuilder:validation:Maximum=65535 + //+kubebuilder:default:=9102 + // The prometheus HTTP port + Port int32 `json:"port,omitempty"` + + // TLS configuration. + // +optional + TLS ServerTLS `json:"tls"` +} + +const ( + AlertNoFlows = "NetObservNoFlows" + AlertLokiError = "NetObservLokiError" +) + +// Name of a processor alert. +// Possible values are:
+// - `NetObservNoFlows`, which is triggered when no flows are being observed for a certain period.
+// - `NetObservLokiError`, which is triggered when flows are being dropped due to Loki errors.
+// +kubebuilder:validation:Enum:="NetObservNoFlows";"NetObservLokiError" +type FLPAlert string + +// `FLPMetrics` define the desired FLP configuration regarding metrics +type FLPMetrics struct { + // Metrics server endpoint configuration for Prometheus scraper + // +optional + Server MetricsServerConfig `json:"server,omitempty"` + + // `ignoreTags` is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . + // Available tags are: `egress`, `ingress`, `flows`, `bytes`, `packets`, `namespaces`, `nodes`, `workloads`. + //+kubebuilder:default:={"egress","packets"} + // +optional + IgnoreTags []string `json:"ignoreTags"` + + // `disableAlerts` is a list of alerts that should be disabled. + // Possible values are:
+ // `NetObservNoFlows`, which is triggered when no flows are being observed for a certain period.
+ // `NetObservLokiError`, which is triggered when flows are being dropped due to Loki errors.
+ // +optional + DisableAlerts []FLPAlert `json:"disableAlerts"` +} + +const ( + LogTypeFlows = "FLOWS" + LogTypeConversations = "CONVERSATIONS" + LogTypeEndedConversations = "ENDED_CONVERSATIONS" + LogTypeAll = "ALL" +) + +// `FlowCollectorFLP` defines the desired flowlogs-pipeline state of FlowCollector +type FlowCollectorFLP struct { + // Important: Run "make generate" to regenerate code after modifying this file + + //+kubebuilder:validation:Minimum=1025 + //+kubebuilder:validation:Maximum=65535 + //+kubebuilder:default:=2055 + // Port of the flow collector (host port). + // By convention, some values are forbidden. It must be greater than 1024 and different from + // 4500, 4789 and 6081. + Port int32 `json:"port,omitempty"` + + //+kubebuilder:validation:Minimum=1 + //+kubebuilder:validation:Maximum=65535 + //+kubebuilder:default:=8080 + // `healthPort` is a collector HTTP port in the Pod that exposes the health check API + HealthPort int32 `json:"healthPort,omitempty"` + + //+kubebuilder:validation:Minimum=0 + //+kubebuilder:validation:Maximum=65535 + //+optional + // `profilePort` allows setting up a Go pprof profiler listening to this port + ProfilePort int32 `json:"profilePort,omitempty"` + + //+kubebuilder:validation:Enum=IfNotPresent;Always;Never + //+kubebuilder:default:=IfNotPresent + // `imagePullPolicy` is the Kubernetes pull policy for the image defined above + ImagePullPolicy string `json:"imagePullPolicy,omitempty"` + + // `Metrics` define the processor configuration regarding metrics + Metrics FLPMetrics `json:"metrics,omitempty"` + + //+kubebuilder:validation:Enum=trace;debug;info;warn;error;fatal;panic + //+kubebuilder:default:=info + // `logLevel` of the processor runtime + LogLevel string `json:"logLevel,omitempty"` + + //+kubebuilder:default:={requests:{memory:"100Mi",cpu:"100m"},limits:{memory:"800Mi"}} + // `resources` are the compute resources required by this container. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + + //+kubebuilder:default:=true + // `enableKubeProbes` is a flag to enable or disable Kubernetes liveness and readiness probes + EnableKubeProbes *bool `json:"enableKubeProbes,omitempty"` + + //+kubebuilder:default:=true + // `dropUnusedFields` allows, when set to true, to drop fields that are known to be unused by OVS, to save storage space. + DropUnusedFields *bool `json:"dropUnusedFields,omitempty"` + + //+kubebuilder:validation:Minimum=0 + //+kubebuilder:default:=3 + // `kafkaConsumerReplicas` defines the number of replicas (pods) to start for `flowlogs-pipeline-transformer`, which consumes Kafka messages. + // This setting is ignored when Kafka is disabled. + KafkaConsumerReplicas *int32 `json:"kafkaConsumerReplicas,omitempty"` + + // `kafkaConsumerAutoscaler` is the spec of a horizontal pod autoscaler to set up for `flowlogs-pipeline-transformer`, which consumes Kafka messages. + // This setting is ignored when Kafka is disabled. + // +optional + KafkaConsumerAutoscaler FlowCollectorHPA `json:"kafkaConsumerAutoscaler,omitempty"` + + //+kubebuilder:default:=1000 + // +optional + // `kafkaConsumerQueueCapacity` defines the capacity of the internal message queue used in the Kafka consumer client. Ignored when not using Kafka. + KafkaConsumerQueueCapacity int `json:"kafkaConsumerQueueCapacity"` + + //+kubebuilder:default:=10485760 + // +optional + // `kafkaConsumerBatchSize` indicates to the broker the maximum batch size, in bytes, that the consumer will accept. Ignored when not using Kafka. Default: 10MB. + KafkaConsumerBatchSize int `json:"kafkaConsumerBatchSize"` + + // `logTypes` defines the desired record types to generate. Possible values are:
+ // - `FLOWS` (default) to export regular network flows
+ // - `CONVERSATIONS` to generate events for started conversations, ended conversations as well as periodic "tick" updates
+ // - `ENDED_CONVERSATIONS` to generate only ended conversations events
+ // - `ALL` to generate both network flows and all conversations events
+ // +kubebuilder:validation:Optional + // +kubebuilder:validation:Enum:="FLOWS";"CONVERSATIONS";"ENDED_CONVERSATIONS";"ALL" + // +kubebuilder:default:=FLOWS + LogTypes *string `json:"logTypes,omitempty"` + + //+kubebuilder:default:="30s" + // +optional + // `conversationHeartbeatInterval` is the time to wait between "tick" events of a conversation + ConversationHeartbeatInterval *metav1.Duration `json:"conversationHeartbeatInterval,omitempty"` + + //+kubebuilder:default:="10s" + // +optional + // `conversationEndTimeout` is the time to wait after a network flow is received, to consider the conversation ended. + // This delay is ignored when a FIN packet is collected for TCP flows (see `conversationTerminatingTimeout` instead). + ConversationEndTimeout *metav1.Duration `json:"conversationEndTimeout,omitempty"` + + //+kubebuilder:default:="5s" + // +optional + // `conversationTerminatingTimeout` is the time to wait from detected FIN flag to end a conversation. Only relevant for TCP flows. + ConversationTerminatingTimeout *metav1.Duration `json:"conversationTerminatingTimeout,omitempty"` + + // `debug` allows setting some aspects of the internal configuration of the flow processor. + // This section is aimed exclusively for debugging and fine-grained performance optimizations, + // such as GOGC and GOMAXPROCS env vars. Users setting its values do it at their own risk. + // +optional + Debug DebugConfig `json:"debug,omitempty"` +} + +const ( + HPAStatusDisabled = "DISABLED" + HPAStatusEnabled = "ENABLED" +) + +type FlowCollectorHPA struct { + // +kubebuilder:validation:Enum:=DISABLED;ENABLED + // +kubebuilder:default:=DISABLED + // `status` describes the desired status regarding deploying an horizontal pod autoscaler.
+ // - `DISABLED` will not deploy an horizontal pod autoscaler.
+ // - `ENABLED` will deploy an horizontal pod autoscaler.
+ Status string `json:"status,omitempty"` + + // `minReplicas` is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. + // +optional + MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` + + // `maxReplicas` is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. + // +kubebuilder:default:=3 + // +optional + MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` + + // Metrics used by the pod autoscaler + // +optional + Metrics []ascv2.MetricSpec `json:"metrics"` +} + +const ( + LokiAuthDisabled = "DISABLED" + LokiAuthUseHostToken = "HOST" + LokiAuthForwardUserToken = "FORWARD" +) + +type LokiManualParams struct { + //+kubebuilder:default:="http://loki:3100/" + // `ingesterUrl` is the address of an existing Loki service to push the flows to. When using the Loki Operator, + // set it to the Loki gateway service with the `network` tenant set in path, for example + // https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network. + IngesterURL string `json:"ingesterUrl,omitempty"` + + //+kubebuilder:validation:optional + // `querierURL` specifies the address of the Loki querier service, in case it is different from the + // Loki ingester URL. If empty, the URL value will be used (assuming that the Loki ingester + // and querier are in the same server). When using the Loki Operator, do not set it, since + // ingestion and queries use the Loki gateway. + QuerierURL string `json:"querierUrl,omitempty"` + + //+kubebuilder:validation:optional + // `statusURL` specifies the address of the Loki `/ready`, `/metrics` and `/config` endpoints, in case it is different from the + // Loki querier URL. If empty, the `querierURL` value will be used. + // This is useful to show error messages and some context in the frontend. + // When using the Loki Operator, set it to the Loki HTTP query frontend service, for example + // https://loki-query-frontend-http.netobserv.svc:3100/. + // `statusTLS` configuration will be used when `statusUrl` is set. + StatusURL string `json:"statusUrl,omitempty"` + + //+kubebuilder:default:="netobserv" + // `tenantID` is the Loki `X-Scope-OrgID` that identifies the tenant for each request. + // When using the Loki Operator, set it to `network`, which corresponds to a special tenant mode. + TenantID string `json:"tenantID,omitempty"` + + // +kubebuilder:validation:Enum:="DISABLED";"HOST";"FORWARD" + //+kubebuilder:default:="DISABLED" + // `authToken` describes the way to get a token to authenticate to Loki.
+ // - `DISABLED` will not send any token with the request.
+ // - `FORWARD` will forward the user token for authorization.
+ // - `HOST` - deprecated (*) - will use the local pod service account to authenticate to Loki.
+ // When using the Loki Operator, this must be set to `FORWARD`. + AuthToken string `json:"authToken,omitempty"` + + // TLS client configuration for Loki URL. + // +optional + TLS ClientTLS `json:"tls"` + + // TLS client configuration for Loki status URL. + // +optional + StatusTLS ClientTLS `json:"statusTls"` +} + +type LokiStack struct { + //+kubebuilder:default:="loki" + Name string `json:"name"` + //+kubebuilder:default:="netobserv" + Namespace string `json:"namespace"` +} + +// FlowCollectorLoki defines the desired state for FlowCollector's Loki client. +type FlowCollectorLoki struct { + + //+kubebuilder:validation:Enum=MANUAL;LOKISTACK + //+kubebuilder:default:="LOKISTACK" + Mode string `json:"mode,omitempty"` + + Manual LokiManualParams `json:"manual,omitempty"` + + LokiStack LokiStack `json:"lokiStack,omitempty"` + + //+kubebuilder:default:=true + // enable storing flows to Loki. It is required for the OpenShift Console plugin installation. + Enable *bool `json:"enable,omitempty"` + + //+kubebuilder:default:="1s" + // `batchWait` is the maximum time to wait before sending a batch. + BatchWait *metav1.Duration `json:"batchWait,omitempty"` // Warning: keep as pointer, else default is ignored + + //+kubebuilder:validation:Minimum=1 + //+kubebuilder:default:=102400 + // `batchSize` is the maximum batch size (in bytes) of logs to accumulate before sending. + BatchSize int64 `json:"batchSize,omitempty"` + + //+kubebuilder:default:="10s" + // `timeout` is the maximum time connection / request limit. + // A timeout of zero means no timeout. + Timeout *metav1.Duration `json:"timeout,omitempty"` // Warning: keep as pointer, else default is ignored + + //+kubebuilder:default="1s" + // `minBackoff` is the initial backoff time for client connection between retries. + MinBackoff *metav1.Duration `json:"minBackoff,omitempty"` // Warning: keep as pointer, else default is ignored + + //+kubebuilder:default="5s" + // `maxBackoff` is the maximum backoff time for client connection between retries. + MaxBackoff *metav1.Duration `json:"maxBackoff,omitempty"` // Warning: keep as pointer, else default is ignored + + //+kubebuilder:validation:Minimum=0 + //+kubebuilder:default:=2 + // `maxRetries` is the maximum number of retries for client connections. + MaxRetries *int32 `json:"maxRetries,omitempty"` + + //+kubebuilder:default:={"app":"netobserv-flowcollector"} + // +optional + // `staticLabels` is a map of common labels to set on each flow. + StaticLabels map[string]string `json:"staticLabels"` +} + +// FlowCollectorConsolePlugin defines the desired ConsolePlugin state of FlowCollector +type FlowCollectorConsolePlugin struct { + // Important: Run "make generate" to regenerate code after modifying this file + + //+kubebuilder:default:=true + // enable the console plugin deployment. + // spec.Loki.enable must also be true + Enable *bool `json:"enable,omitempty"` + + //+kubebuilder:default:=true + // `register` allows, when set to true, to automatically register the provided console plugin with the OpenShift Console operator. + // When set to false, you can still register it manually by editing console.operator.openshift.io/cluster with the following command: + // `oc patch console.operator.openshift.io cluster --type='json' -p '[{"op": "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]'` + Register *bool `json:"register,omitempty"` + + //+kubebuilder:validation:Minimum=0 + //+kubebuilder:default:=1 + // `replicas` defines the number of replicas (pods) to start. + Replicas *int32 `json:"replicas,omitempty"` + + //+kubebuilder:validation:Minimum=1 + //+kubebuilder:validation:Maximum=65535 + //+kubebuilder:default:=9001 + // `port` is the plugin service port. Do not use 9002, which is reserved for metrics. + Port int32 `json:"port,omitempty"` + + //+kubebuilder:validation:Enum=IfNotPresent;Always;Never + //+kubebuilder:default:=IfNotPresent + // `imagePullPolicy` is the Kubernetes pull policy for the image defined above + ImagePullPolicy string `json:"imagePullPolicy,omitempty"` + + //+kubebuilder:default:={requests:{memory:"50Mi",cpu:"100m"},limits:{memory:"100Mi"}} + // `resources`, in terms of compute resources, required by this container. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + + //+kubebuilder:validation:Enum=trace;debug;info;warn;error;fatal;panic + //+kubebuilder:default:=info + // `logLevel` for the console plugin backend + LogLevel string `json:"logLevel,omitempty"` + + // `autoscaler` spec of a horizontal pod autoscaler to set up for the plugin Deployment. + // +optional + Autoscaler FlowCollectorHPA `json:"autoscaler,omitempty"` + + //+kubebuilder:default:={enable:true} + // `portNaming` defines the configuration of the port-to-service name translation + PortNaming ConsolePluginPortConfig `json:"portNaming,omitempty"` + + //+kubebuilder:default:={{name:"Applications",filter:{"src_namespace!":"openshift-,netobserv","dst_namespace!":"openshift-,netobserv"},default:true},{name:"Infrastructure",filter:{"src_namespace":"openshift-,netobserv","dst_namespace":"openshift-,netobserv"}},{name:"Pods network",filter:{"src_kind":"Pod","dst_kind":"Pod"},default:true},{name:"Services network",filter:{"dst_kind":"Service"}}} + // +optional + // `quickFilters` configures quick filter presets for the Console plugin + QuickFilters []QuickFilter `json:"quickFilters"` +} + +// Configuration of the port to service name translation feature of the console plugin +type ConsolePluginPortConfig struct { + //+kubebuilder:default:=true + // Enable the console plugin port-to-service name translation + Enable *bool `json:"enable,omitempty"` + + // `portNames` defines additional port names to use in the console, + // for example, `portNames: {"3100": "loki"}`. + // +optional + PortNames map[string]string `json:"portNames" yaml:"portNames"` +} + +// `QuickFilter` defines preset configuration for Console's quick filters +type QuickFilter struct { + // Name of the filter, that will be displayed in Console + // +kubebuilder:MinLength:=1 + Name string `json:"name"` + // `filter` is a set of keys and values to be set when this filter is selected. Each key can relate to a list of values using a coma-separated string, + // for example, `filter: {"src_namespace": "namespace1,namespace2"}`. + // +kubebuilder:MinProperties:=1 + Filter map[string]string `json:"filter"` + // `default` defines whether this filter should be active by default or not + // +optional + Default bool `json:"default,omitempty"` +} + +// `ClusterNetworkOperatorConfig` defines the desired configuration related to the Cluster Network Configuration +type ClusterNetworkOperatorConfig struct { + // Important: Run "make generate" to regenerate code after modifying this file + + //+kubebuilder:default:=openshift-network-operator + // Namespace where the config map is going to be deployed. + Namespace string `json:"namespace,omitempty"` +} + +// `OVNKubernetesConfig` defines the desired configuration related to the OVN-Kubernetes network provider, when Cluster Network Operator isn't installed. +type OVNKubernetesConfig struct { + // Important: Run "make generate" to regenerate code after modifying this file + + //+kubebuilder:default:=ovn-kubernetes + // Namespace where OVN-Kubernetes pods are deployed. + Namespace string `json:"namespace,omitempty"` + + //+kubebuilder:default:=ovnkube-node + // `daemonSetName` defines the name of the DaemonSet controlling the OVN-Kubernetes pods. + DaemonSetName string `json:"daemonSetName,omitempty"` + + //+kubebuilder:default:=ovnkube-node + // `containerName` defines the name of the container to configure for IPFIX. + ContainerName string `json:"containerName,omitempty"` +} + +type MountableType string + +const ( + RefTypeSecret MountableType = "secret" + RefTypeConfigMap MountableType = "configmap" +) + +type CertificateReference struct { + //+kubebuilder:validation:Enum=configmap;secret + // Type for the certificate reference: `configmap` or `secret` + Type MountableType `json:"type,omitempty"` + + // Name of the config map or secret containing certificates + Name string `json:"name,omitempty"` + + // Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. + // If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. + // +optional + //+kubebuilder:default:="" + Namespace string `json:"namespace,omitempty"` + + // `certFile` defines the path to the certificate file name within the config map or secret + CertFile string `json:"certFile,omitempty"` + + // `certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. + // +optional + CertKey string `json:"certKey,omitempty"` +} + +// `ClientTLS` defines TLS client configuration +type ClientTLS struct { + //+kubebuilder:default:=false + // Enable TLS + Enable bool `json:"enable,omitempty"` + + //+kubebuilder:default:=false + // `insecureSkipVerify` allows skipping client-side verification of the server certificate. + // If set to true, the `caCert` field is ignored. + InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"` + + // `caCert` defines the reference of the certificate for the Certificate Authority + CACert CertificateReference `json:"caCert,omitempty"` + + // `userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS) + // +optional + UserCert CertificateReference `json:"userCert,omitempty"` +} + +type SASLType string + +const ( + SASLDisabled SASLType = "DISABLED" + SASLPlain SASLType = "PLAIN" + SASLScramSHA512 SASLType = "SCRAM-SHA512" +) + +// `SASLConfig` defines SASL configuration +type SASLConfig struct { + //+kubebuilder:validation:Enum=DISABLED;PLAIN;SCRAM-SHA512 + //+kubebuilder:default:=DISABLED + // Type of SASL authentication to use, or `DISABLED` if SASL is not used + Type SASLType `json:"type,omitempty"` + + // Reference to the secret or config map containing the client ID and secret + Reference ConfigOrSecret `json:"reference,omitempty"` + + // Key for client ID within the provided `reference` + ClientIDKey string `json:"clientIDKey,omitempty"` + + // Key for client secret within the provided `reference` + ClientSecretKey string `json:"clientSecretKey,omitempty"` +} + +type ConfigOrSecret struct { + //+kubebuilder:validation:Enum=configmap;secret + // Type for the reference: "configmap" or "secret" + Type MountableType `json:"type,omitempty"` + + // Name of the config map or secret to reference + Name string `json:"name,omitempty"` + + // Namespace of the config map or secret. If omitted, assumes same namespace as where NetObserv is deployed. + // If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. + // +optional + //+kubebuilder:default:="" + Namespace string `json:"namespace,omitempty"` +} + +// `DebugConfig` allows tweaking some aspects of the internal configuration of the agent and FLP. +// They are aimed exclusively for debugging. Users setting these values do it at their own risk. +type DebugConfig struct { + // `env` allows passing custom environment variables to underlying components. Useful for passing + // some very concrete performance-tuning options, such as GOGC and GOMAXPROCS, that should not be + // publicly exposed as part of the FlowCollector descriptor, as they are only useful + // in edge debug or support scenarios. + //+optional + Env map[string]string `json:"env,omitempty"` +} + +// Add more exporter types below +type ExporterType string + +const ( + KafkaExporter ExporterType = "KAFKA" + IpfixExporter ExporterType = "IPFIX" +) + +// `FlowCollectorExporter` defines an additional exporter to send enriched flows to. +type FlowCollectorExporter struct { + // `type` selects the type of exporters. The available options are `KAFKA` and `IPFIX`. `IPFIX` is unsupported (*). + // +unionDiscriminator + // +kubebuilder:validation:Enum:="KAFKA";"IPFIX" + // +kubebuilder:validation:Required + Type ExporterType `json:"type"` + + // Kafka configuration, such as the address and topic, to send enriched flows to. + // +optional + Kafka FlowCollectorKafka `json:"kafka,omitempty"` + + // IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. Unsupported (*). + // +optional + IPFIX FlowCollectorIPFIXReceiver `json:"ipfix,omitempty"` +} + +// `FlowCollectorStatus` defines the observed state of FlowCollector +type FlowCollectorStatus struct { + // Important: Run "make" to regenerate code after modifying this file + + // `conditions` represent the latest available observations of an object's state + Conditions []metav1.Condition `json:"conditions"` + + // Namespace where console plugin and flowlogs-pipeline have been deployed. + Namespace string `json:"namespace,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:printcolumn:name="Agent",type="string",JSONPath=`.spec.agent.type` +// +kubebuilder:printcolumn:name="Sampling (EBPF)",type="string",JSONPath=`.spec.agent.ebpf.sampling` +// +kubebuilder:printcolumn:name="Deployment Model",type="string",JSONPath=`.spec.deploymentModel` +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[*].reason" +// +kubebuilder:storageversion +// `FlowCollector` is the schema for the network flows collection API, which pilots and configures the underlying deployments. +type FlowCollector struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec FlowCollectorSpec `json:"spec,omitempty"` + Status FlowCollectorStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// `FlowCollectorList` contains a list of FlowCollector +type FlowCollectorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FlowCollector `json:"items"` +} + +func init() { + SchemeBuilder.Register(&FlowCollector{}, &FlowCollectorList{}) +} diff --git a/api/v1beta2/flowcollector_webhook.go b/api/v1beta2/flowcollector_webhook.go new file mode 100644 index 000000000..c3e3a71e8 --- /dev/null +++ b/api/v1beta2/flowcollector_webhook.go @@ -0,0 +1,32 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ctrl "sigs.k8s.io/controller-runtime" + +// +kubebuilder:webhook:verbs=create;update,path=/validate-netobserv-io-v1beta2-flowcollector,mutating=false,failurePolicy=fail,groups=netobserv.io,resources=flowcollectors,versions=v1beta2,name=flowcollectorconversionwebhook.netobserv.io,sideEffects=None,admissionReviewVersions=v1 +func (r *FlowCollector) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// Hub marks this version as a conversion hub. +// All the other version need to provide converters from/to this version. +// https://book.kubebuilder.io/multiversion-tutorial/conversion-concepts.html +func (*FlowCollector) Hub() {} +func (*FlowCollectorList) Hub() {} diff --git a/api/v1beta2/groupversion_info.go b/api/v1beta2/groupversion_info.go new file mode 100644 index 000000000..9fa9dec5d --- /dev/null +++ b/api/v1beta2/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta2 contains API Schema definitions for the flows v1beta2 API group +// +kubebuilder:object:generate=true +// +groupName=flows.netobserv.io +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "flows.netobserv.io", Version: "v1beta2"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..13113ccd3 --- /dev/null +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,698 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "k8s.io/api/autoscaling/v2" + "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateReference) DeepCopyInto(out *CertificateReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateReference. +func (in *CertificateReference) DeepCopy() *CertificateReference { + if in == nil { + return nil + } + out := new(CertificateReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientTLS) DeepCopyInto(out *ClientTLS) { + *out = *in + out.CACert = in.CACert + out.UserCert = in.UserCert +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientTLS. +func (in *ClientTLS) DeepCopy() *ClientTLS { + if in == nil { + return nil + } + out := new(ClientTLS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkOperatorConfig) DeepCopyInto(out *ClusterNetworkOperatorConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkOperatorConfig. +func (in *ClusterNetworkOperatorConfig) DeepCopy() *ClusterNetworkOperatorConfig { + if in == nil { + return nil + } + out := new(ClusterNetworkOperatorConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigOrSecret) DeepCopyInto(out *ConfigOrSecret) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigOrSecret. +func (in *ConfigOrSecret) DeepCopy() *ConfigOrSecret { + if in == nil { + return nil + } + out := new(ConfigOrSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsolePluginPortConfig) DeepCopyInto(out *ConsolePluginPortConfig) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.PortNames != nil { + in, out := &in.PortNames, &out.PortNames + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsolePluginPortConfig. +func (in *ConsolePluginPortConfig) DeepCopy() *ConsolePluginPortConfig { + if in == nil { + return nil + } + out := new(ConsolePluginPortConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DebugConfig) DeepCopyInto(out *DebugConfig) { + *out = *in + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DebugConfig. +func (in *DebugConfig) DeepCopy() *DebugConfig { + if in == nil { + return nil + } + out := new(DebugConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FLPMetrics) DeepCopyInto(out *FLPMetrics) { + *out = *in + in.Server.DeepCopyInto(&out.Server) + if in.IgnoreTags != nil { + in, out := &in.IgnoreTags, &out.IgnoreTags + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DisableAlerts != nil { + in, out := &in.DisableAlerts, &out.DisableAlerts + *out = make([]FLPAlert, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FLPMetrics. +func (in *FLPMetrics) DeepCopy() *FLPMetrics { + if in == nil { + return nil + } + out := new(FLPMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollector) DeepCopyInto(out *FlowCollector) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollector. +func (in *FlowCollector) DeepCopy() *FlowCollector { + if in == nil { + return nil + } + out := new(FlowCollector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlowCollector) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollectorAgent) DeepCopyInto(out *FlowCollectorAgent) { + *out = *in + out.IPFIX = in.IPFIX + in.EBPF.DeepCopyInto(&out.EBPF) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorAgent. +func (in *FlowCollectorAgent) DeepCopy() *FlowCollectorAgent { + if in == nil { + return nil + } + out := new(FlowCollectorAgent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollectorConsolePlugin) DeepCopyInto(out *FlowCollectorConsolePlugin) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Register != nil { + in, out := &in.Register, &out.Register + *out = new(bool) + **out = **in + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + in.Resources.DeepCopyInto(&out.Resources) + in.Autoscaler.DeepCopyInto(&out.Autoscaler) + in.PortNaming.DeepCopyInto(&out.PortNaming) + if in.QuickFilters != nil { + in, out := &in.QuickFilters, &out.QuickFilters + *out = make([]QuickFilter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorConsolePlugin. +func (in *FlowCollectorConsolePlugin) DeepCopy() *FlowCollectorConsolePlugin { + if in == nil { + return nil + } + out := new(FlowCollectorConsolePlugin) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollectorEBPF) DeepCopyInto(out *FlowCollectorEBPF) { + *out = *in + in.Resources.DeepCopyInto(&out.Resources) + if in.Sampling != nil { + in, out := &in.Sampling, &out.Sampling + *out = new(int32) + **out = **in + } + if in.Interfaces != nil { + in, out := &in.Interfaces, &out.Interfaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludeInterfaces != nil { + in, out := &in.ExcludeInterfaces, &out.ExcludeInterfaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.Debug.DeepCopyInto(&out.Debug) + if in.EnablePktDrop != nil { + in, out := &in.EnablePktDrop, &out.EnablePktDrop + *out = new(bool) + **out = **in + } + if in.EnableDNSTracking != nil { + in, out := &in.EnableDNSTracking, &out.EnableDNSTracking + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorEBPF. +func (in *FlowCollectorEBPF) DeepCopy() *FlowCollectorEBPF { + if in == nil { + return nil + } + out := new(FlowCollectorEBPF) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollectorExporter) DeepCopyInto(out *FlowCollectorExporter) { + *out = *in + out.Kafka = in.Kafka + out.IPFIX = in.IPFIX +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorExporter. +func (in *FlowCollectorExporter) DeepCopy() *FlowCollectorExporter { + if in == nil { + return nil + } + out := new(FlowCollectorExporter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollectorFLP) DeepCopyInto(out *FlowCollectorFLP) { + *out = *in + in.Metrics.DeepCopyInto(&out.Metrics) + in.Resources.DeepCopyInto(&out.Resources) + if in.EnableKubeProbes != nil { + in, out := &in.EnableKubeProbes, &out.EnableKubeProbes + *out = new(bool) + **out = **in + } + if in.DropUnusedFields != nil { + in, out := &in.DropUnusedFields, &out.DropUnusedFields + *out = new(bool) + **out = **in + } + if in.KafkaConsumerReplicas != nil { + in, out := &in.KafkaConsumerReplicas, &out.KafkaConsumerReplicas + *out = new(int32) + **out = **in + } + in.KafkaConsumerAutoscaler.DeepCopyInto(&out.KafkaConsumerAutoscaler) + if in.LogTypes != nil { + in, out := &in.LogTypes, &out.LogTypes + *out = new(string) + **out = **in + } + if in.ConversationHeartbeatInterval != nil { + in, out := &in.ConversationHeartbeatInterval, &out.ConversationHeartbeatInterval + *out = new(v1.Duration) + **out = **in + } + if in.ConversationEndTimeout != nil { + in, out := &in.ConversationEndTimeout, &out.ConversationEndTimeout + *out = new(v1.Duration) + **out = **in + } + if in.ConversationTerminatingTimeout != nil { + in, out := &in.ConversationTerminatingTimeout, &out.ConversationTerminatingTimeout + *out = new(v1.Duration) + **out = **in + } + in.Debug.DeepCopyInto(&out.Debug) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorFLP. +func (in *FlowCollectorFLP) DeepCopy() *FlowCollectorFLP { + if in == nil { + return nil + } + out := new(FlowCollectorFLP) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollectorHPA) DeepCopyInto(out *FlowCollectorHPA) { + *out = *in + if in.MinReplicas != nil { + in, out := &in.MinReplicas, &out.MinReplicas + *out = new(int32) + **out = **in + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]v2.MetricSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorHPA. +func (in *FlowCollectorHPA) DeepCopy() *FlowCollectorHPA { + if in == nil { + return nil + } + out := new(FlowCollectorHPA) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollectorIPFIX) DeepCopyInto(out *FlowCollectorIPFIX) { + *out = *in + out.ClusterNetworkOperator = in.ClusterNetworkOperator + out.OVNKubernetes = in.OVNKubernetes +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorIPFIX. +func (in *FlowCollectorIPFIX) DeepCopy() *FlowCollectorIPFIX { + if in == nil { + return nil + } + out := new(FlowCollectorIPFIX) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollectorIPFIXReceiver) DeepCopyInto(out *FlowCollectorIPFIXReceiver) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorIPFIXReceiver. +func (in *FlowCollectorIPFIXReceiver) DeepCopy() *FlowCollectorIPFIXReceiver { + if in == nil { + return nil + } + out := new(FlowCollectorIPFIXReceiver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollectorKafka) DeepCopyInto(out *FlowCollectorKafka) { + *out = *in + out.TLS = in.TLS + out.SASL = in.SASL +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorKafka. +func (in *FlowCollectorKafka) DeepCopy() *FlowCollectorKafka { + if in == nil { + return nil + } + out := new(FlowCollectorKafka) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollectorList) DeepCopyInto(out *FlowCollectorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FlowCollector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorList. +func (in *FlowCollectorList) DeepCopy() *FlowCollectorList { + if in == nil { + return nil + } + out := new(FlowCollectorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlowCollectorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollectorLoki) DeepCopyInto(out *FlowCollectorLoki) { + *out = *in + out.Manual = in.Manual + out.LokiStack = in.LokiStack + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.BatchWait != nil { + in, out := &in.BatchWait, &out.BatchWait + *out = new(v1.Duration) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(v1.Duration) + **out = **in + } + if in.MinBackoff != nil { + in, out := &in.MinBackoff, &out.MinBackoff + *out = new(v1.Duration) + **out = **in + } + if in.MaxBackoff != nil { + in, out := &in.MaxBackoff, &out.MaxBackoff + *out = new(v1.Duration) + **out = **in + } + if in.MaxRetries != nil { + in, out := &in.MaxRetries, &out.MaxRetries + *out = new(int32) + **out = **in + } + if in.StaticLabels != nil { + in, out := &in.StaticLabels, &out.StaticLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorLoki. +func (in *FlowCollectorLoki) DeepCopy() *FlowCollectorLoki { + if in == nil { + return nil + } + out := new(FlowCollectorLoki) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollectorSpec) DeepCopyInto(out *FlowCollectorSpec) { + *out = *in + in.Agent.DeepCopyInto(&out.Agent) + in.Processor.DeepCopyInto(&out.Processor) + in.Loki.DeepCopyInto(&out.Loki) + in.ConsolePlugin.DeepCopyInto(&out.ConsolePlugin) + out.Kafka = in.Kafka + if in.Exporters != nil { + in, out := &in.Exporters, &out.Exporters + *out = make([]*FlowCollectorExporter, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(FlowCollectorExporter) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorSpec. +func (in *FlowCollectorSpec) DeepCopy() *FlowCollectorSpec { + if in == nil { + return nil + } + out := new(FlowCollectorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollectorStatus) DeepCopyInto(out *FlowCollectorStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorStatus. +func (in *FlowCollectorStatus) DeepCopy() *FlowCollectorStatus { + if in == nil { + return nil + } + out := new(FlowCollectorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LokiManualParams) DeepCopyInto(out *LokiManualParams) { + *out = *in + out.TLS = in.TLS + out.StatusTLS = in.StatusTLS +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiManualParams. +func (in *LokiManualParams) DeepCopy() *LokiManualParams { + if in == nil { + return nil + } + out := new(LokiManualParams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LokiStack) DeepCopyInto(out *LokiStack) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiStack. +func (in *LokiStack) DeepCopy() *LokiStack { + if in == nil { + return nil + } + out := new(LokiStack) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsServerConfig) DeepCopyInto(out *MetricsServerConfig) { + *out = *in + in.TLS.DeepCopyInto(&out.TLS) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsServerConfig. +func (in *MetricsServerConfig) DeepCopy() *MetricsServerConfig { + if in == nil { + return nil + } + out := new(MetricsServerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OVNKubernetesConfig) DeepCopyInto(out *OVNKubernetesConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OVNKubernetesConfig. +func (in *OVNKubernetesConfig) DeepCopy() *OVNKubernetesConfig { + if in == nil { + return nil + } + out := new(OVNKubernetesConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuickFilter) DeepCopyInto(out *QuickFilter) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuickFilter. +func (in *QuickFilter) DeepCopy() *QuickFilter { + if in == nil { + return nil + } + out := new(QuickFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SASLConfig) DeepCopyInto(out *SASLConfig) { + *out = *in + out.Reference = in.Reference +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SASLConfig. +func (in *SASLConfig) DeepCopy() *SASLConfig { + if in == nil { + return nil + } + out := new(SASLConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerTLS) DeepCopyInto(out *ServerTLS) { + *out = *in + if in.Provided != nil { + in, out := &in.Provided, &out.Provided + *out = new(CertificateReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerTLS. +func (in *ServerTLS) DeepCopy() *ServerTLS { + if in == nil { + return nil + } + out := new(ServerTLS) + in.DeepCopyInto(out) + return out +} diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml index 665ef772f..e8b80bffa 100644 --- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml +++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml @@ -4917,6 +4917,2356 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.agent.type + name: Agent + type: string + - jsonPath: .spec.agent.ebpf.sampling + name: Sampling (EBPF) + type: string + - jsonPath: .spec.deploymentModel + name: Deployment Model + type: string + - jsonPath: .status.conditions[*].reason + name: Status + type: string + name: v1beta2 + schema: + openAPIV3Schema: + description: FlowCollector is the schema for the network flows collection + API, which pilots and configures the underlying deployments. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'FlowCollectorSpec defines the desired state of FlowCollector. +

*: the mention of "unsupported", or "deprecated" + for a feature throughout this document means that this feature is not + officially supported by Red Hat. It may have been, for instance, contributed + by the community and accepted without a formal agreement for maintenance. + The product maintainers may provide some support for these features + as a best effort only.' + properties: + agent: + default: + type: EBPF + description: agent for flows extraction. + properties: + ebpf: + description: ebpf describes the settings related to the eBPF-based + flow reporter when the "agent.type" property is set to "EBPF". + properties: + cacheActiveTimeout: + default: 5s + description: cacheActiveTimeout is the max period during which + the reporter will aggregate flows before sending. Increasing + `cacheMaxFlows` and `cacheActiveTimeout` can decrease the + network traffic overhead and the CPU load, however you can + expect higher memory consumption and an increased latency + in the flow collection. + pattern: ^\d+(ns|ms|s|m)?$ + type: string + cacheMaxFlows: + default: 100000 + description: cacheMaxFlows is the max number of flows in an + aggregate; when reached, the reporter sends the flows. Increasing + `cacheMaxFlows` and `cacheActiveTimeout` can decrease the + network traffic overhead and the CPU load, however you can + expect higher memory consumption and an increased latency + in the flow collection. + format: int32 + minimum: 1 + type: integer + debug: + description: Debug allows setting some aspects of the internal + configuration of the eBPF agent. This section is aimed exclusively + for debugging and fine-grained performance optimizations + (for example GOGC, GOMAXPROCS env vars). Users setting its + values do it at their own risk. + properties: + env: + additionalProperties: + type: string + description: env allows passing custom environment variables + to the NetObserv Agent. Useful for passing some very + concrete performance-tuning options (such as GOGC, GOMAXPROCS) + that shouldn't be publicly exposed as part of the FlowCollector + descriptor, as they are only useful in edge debug and + support scenarios. + type: object + type: object + excludeInterfaces: + default: + - lo + description: excludeInterfaces contains the interface names + that will be excluded from flow tracing. If an entry is + enclosed by slashes (such as `/br-/`), it will match as + regular expression, otherwise it will be matched as a case-sensitive + string. + items: + type: string + type: array + imagePullPolicy: + default: IfNotPresent + description: imagePullPolicy is the Kubernetes pull policy + for the image defined above + enum: + - IfNotPresent + - Always + - Never + type: string + interfaces: + description: interfaces contains the interface names from + where flows will be collected. If empty, the agent will + fetch all the interfaces in the system, excepting the ones + listed in ExcludeInterfaces. If an entry is enclosed by + slashes (such as `/br-/`), it will match as regular expression, + otherwise it will be matched as a case-sensitive string. + items: + type: string + type: array + kafkaBatchSize: + default: 10485760 + description: 'kafkaBatchSize limits the maximum size of a + request in bytes before being sent to a partition. Ignored + when not using Kafka. Default: 10MB.' + type: integer + logLevel: + default: info + description: logLevel defines the log level for the NetObserv + eBPF Agent + enum: + - trace + - debug + - info + - warn + - error + - fatal + - panic + type: string + privileged: + description: 'privileged mode for the eBPF Agent container. + In general this setting can be ignored or set to false: + in that case, the operator will set granular capabilities + (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, + to enable its correct operation. If for some reason these + capabilities cannot be set (for example old kernel version + not knowing CAP_BPF) then you can turn on this mode for + more global privileges.' + type: boolean + resources: + default: + limits: + memory: 800Mi + requests: + cpu: 100m + memory: 50Mi + description: 'resources are the compute resources required + by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + sampling: + default: 50 + description: sampling rate of the flow reporter. 100 means + one flow on 100 is sent. 0 or 1 means all flows are sampled. + format: int32 + minimum: 0 + type: integer + type: object + ipfix: + description: ipfix - deprecated (*) - describes the settings + related to the IPFIX-based flow reporter when the "agent.type" + property is set to "IPFIX". + properties: + cacheActiveTimeout: + default: 20s + description: cacheActiveTimeout is the max period during which + the reporter will aggregate flows before sending + pattern: ^\d+(ns|ms|s|m)?$ + type: string + cacheMaxFlows: + default: 400 + description: cacheMaxFlows is the max number of flows in an + aggregate; when reached, the reporter sends the flows + format: int32 + minimum: 0 + type: integer + clusterNetworkOperator: + description: clusterNetworkOperator defines the settings related + to the OpenShift Cluster Network Operator, when available. + properties: + namespace: + default: openshift-network-operator + description: namespace where the config map is going + to be deployed. + type: string + type: object + forceSampleAll: + default: false + description: forceSampleAll allows disabling sampling in the + IPFIX-based flow reporter. It is not recommended to sample + all the traffic with IPFIX, as it might generate cluster + instability. If you REALLY want to do that, set this flag + to true. Use at your own risk. When it is set to true, the + value of "sampling" is ignored. + type: boolean + ovnKubernetes: + description: ovnKubernetes defines the settings of the OVN-Kubernetes + CNI, when available. This configuration is used when using + OVN's IPFIX exports, without OpenShift. When using OpenShift, + refer to the `clusterNetworkOperator` property instead. + properties: + containerName: + default: ovnkube-node + description: containerName defines the name of the container + to configure for IPFIX. + type: string + daemonSetName: + default: ovnkube-node + description: daemonSetName defines the name of the DaemonSet + controlling the OVN-Kubernetes pods. + type: string + namespace: + default: ovn-kubernetes + description: namespace where OVN-Kubernetes pods are deployed. + type: string + type: object + sampling: + default: 400 + description: sampling is the sampling rate on the reporter. + 100 means one flow on 100 is sent. To ensure cluster stability, + it is not possible to set a value below 2. If you really + want to sample every packet, which might impact the cluster + stability, refer to "forceSampleAll". Alternatively, you + can use the eBPF Agent instead of IPFIX. + format: int32 + minimum: 2 + type: integer + type: object + type: + default: EBPF + description: type selects the flows tracing agent. Possible values + are "EBPF" (default) to use NetObserv eBPF agent, "IPFIX" - + deprecated (*) - to use the legacy IPFIX collector. "EBPF" + is recommended in most cases as it offers better performances + and should work regardless of the CNI installed on the cluster. + "IPFIX" works with OVN-Kubernetes CNI (other CNIs could work + if they support exporting IPFIX, but they would require manual + configuration). + enum: + - EBPF + - IPFIX + type: string + required: + - type + type: object + consolePlugin: + description: consolePlugin defines the settings related to the OpenShift + Console plugin, when available. + properties: + autoscaler: + description: autoscaler spec of a horizontal pod autoscaler to + set up for the plugin Deployment. + properties: + maxReplicas: + default: 3 + description: maxReplicas is the upper limit for the number + of pods that can be set by the autoscaler; cannot be smaller + than MinReplicas. + format: int32 + type: integer + metrics: + description: metrics used by the pod autoscaler + items: + description: MetricSpec specifies how to scale based on + a single metric (only `type` and one other matching field + should be set at once). + properties: + containerResource: + description: containerResource refers to a resource + metric (such as those specified in requests and limits) + known to Kubernetes describing a single container + in each pod of the current scale target (e.g. CPU + or memory). Such metrics are built in to Kubernetes, + and have special scaling options on top of those available + to normal per-pod metrics using the "pods" source. + This is an alpha feature and can be enabled by the + HPAContainerMetrics feature flag. + properties: + container: + description: container is the name of the container + in the pods of the scaling target + type: string + name: + description: name is the name of the resource in + question. + type: string + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - container + - name + - target + type: object + external: + description: external refers to a global metric that + is not associated with any Kubernetes object. It allows + autoscaling based on information coming from components + running outside of cluster (for example length of + queue in cloud messaging service, or QPS from loadbalancer + running outside of cluster). + properties: + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - metric + - target + type: object + object: + description: object refers to a metric describing a + single kubernetes object (for example, hits-per-second + on an Ingress object). + properties: + describedObject: + description: describedObject specifies the descriptions + of a object,such as kind,name apiVersion + properties: + apiVersion: + description: API version of the referent + type: string + kind: + description: 'Kind of the referent; More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"' + type: string + name: + description: 'Name of the referent; More info: + http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + required: + - kind + - name + type: object + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - describedObject + - metric + - target + type: object + pods: + description: pods refers to a metric describing each + pod in the current scale target (for example, transactions-processed-per-second). The + values will be averaged together before being compared + to the target value. + properties: + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - metric + - target + type: object + resource: + description: resource refers to a resource metric (such + as those specified in requests and limits) known to + Kubernetes describing each pod in the current scale + target (e.g. CPU or memory). Such metrics are built + in to Kubernetes, and have special scaling options + on top of those available to normal per-pod metrics + using the "pods" source. + properties: + name: + description: name is the name of the resource in + question. + type: string + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - name + - target + type: object + type: + description: 'type is the type of metric source. It + should be one of "ContainerResource", "External", + "Object", "Pods" or "Resource", each mapping to a + matching field in the object. Note: "ContainerResource" + type is available on when the feature-gate HPAContainerMetrics + is enabled' + type: string + required: + - type + type: object + type: array + minReplicas: + description: minReplicas is the lower limit for the number + of replicas to which the autoscaler can scale down. It + defaults to 1 pod. minReplicas is allowed to be 0 if the + alpha feature gate HPAScaleToZero is enabled and at least + one Object or External metric is configured. Scaling is + active as long as at least one metric value is available. + format: int32 + type: integer + status: + default: DISABLED + description: Status describe the desired status regarding + deploying an horizontal pod autoscaler DISABLED will not + deploy an horizontal pod autoscaler ENABLED will deploy + an horizontal pod autoscaler + enum: + - DISABLED + - ENABLED + type: string + type: object + imagePullPolicy: + default: IfNotPresent + description: imagePullPolicy is the Kubernetes pull policy for + the image defined above + enum: + - IfNotPresent + - Always + - Never + type: string + logLevel: + default: info + description: logLevel for the console plugin backend + enum: + - trace + - debug + - info + - warn + - error + - fatal + - panic + type: string + port: + default: 9001 + description: port is the plugin service port. Do not use 9002, + which is reserved for metrics. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + portNaming: + default: + enable: true + description: portNaming defines the configuration of the port-to-service + name translation + properties: + enable: + default: true + description: enable the console plugin port-to-service name + translation + type: boolean + portNames: + additionalProperties: + type: string + description: 'portNames defines additional port names to use + in the console. Example: portNames: {"3100": "loki"}' + type: object + type: object + quickFilters: + default: + - default: true + filter: + dst_namespace!: openshift-,netobserv + src_namespace!: openshift-,netobserv + name: Applications + - filter: + dst_namespace: openshift-,netobserv + src_namespace: openshift-,netobserv + name: Infrastructure + - default: true + filter: + dst_kind: Pod + src_kind: Pod + name: Pods network + - filter: + dst_kind: Service + name: Services network + description: quickFilters configures quick filter presets for + the Console plugin + items: + description: QuickFilter defines preset configuration for Console's + quick filters + properties: + default: + description: default defines whether this filter should + be active by default or not + type: boolean + filter: + additionalProperties: + type: string + description: 'filter is a set of keys and values to be set + when this filter is selected. Each key can relate to a + list of values using a coma-separated string. Example: + filter: {"src_namespace": "namespace1,namespace2"}' + type: object + name: + description: name of the filter, that will be displayed + in Console + type: string + required: + - filter + - name + type: object + type: array + register: + default: true + description: 'register allows, when set to true, to automatically + register the provided console plugin with the OpenShift Console + operator. When set to false, you can still register it manually + by editing console.operator.openshift.io/cluster. E.g: oc patch + console.operator.openshift.io cluster --type=''json'' -p ''[{"op": + "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]''' + type: boolean + replicas: + default: 1 + description: replicas defines the number of replicas (pods) to + start. + format: int32 + minimum: 0 + type: integer + resources: + default: + limits: + memory: 100Mi + requests: + cpu: 100m + memory: 50Mi + description: 'resources, in terms of compute resources, required + by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + deploymentModel: + default: DIRECT + description: deploymentModel defines the desired type of deployment + for flow processing. Possible values are "DIRECT" (default) to make + the flow processor listening directly from the agents, or "KAFKA" + to make flows sent to a Kafka pipeline before consumption by the + processor. Kafka can provide better scalability, resiliency and + high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka). + enum: + - DIRECT + - KAFKA + type: string + exporters: + description: exporters define additional optional exporters for custom + consumption or storage. + items: + description: FlowCollectorExporter defines an additional exporter + to send enriched flows to. + properties: + ipfix: + description: IPFIX configuration, such as the IP address and + port to send enriched IPFIX flows to. Unsupported (*). + properties: + targetHost: + default: "" + description: address of the ipfix external receiver + type: string + targetPort: + description: port for the ipfix external receiver + type: integer + transport: + description: Transport protocol (tcp/udp) to be used for + the IPFIX connection, defaults to tcp + enum: + - TCP + - UDP + type: string + required: + - targetHost + - targetPort + type: object + kafka: + description: kafka configuration, such as the address and topic, + to send enriched flows to. + properties: + address: + default: "" + description: address of the Kafka server + type: string + tls: + description: tls client configuration. When using TLS, verify + that the address matches the Kafka port used for TLS, + generally 9093. Note that, when eBPF agents are used, + Kafka certificate needs to be copied in the agent namespace + (by default it's netobserv-privileged). + properties: + caCert: + description: caCert defines the reference of the certificate + for the Certificate Authority + properties: + certFile: + description: certFile defines the path to the certificate + file name within the config map or secret + type: string + certKey: + description: certKey defines the path to the certificate + private key file name within the config map or + secret. Omit when the key is not necessary. + type: string + name: + description: name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes same + namespace as where NetObserv is deployed. If the + namespace is different, the config map or the + secret will be copied so that it can be mounted + as required. + type: string + type: + description: 'type for the certificate reference: + "configmap" or "secret"' + enum: + - configmap + - secret + type: string + type: object + enable: + default: false + description: enable TLS + type: boolean + insecureSkipVerify: + default: false + description: insecureSkipVerify allows skipping client-side + verification of the server certificate If set to true, + CACert field will be ignored + type: boolean + userCert: + description: userCert defines the user certificate reference, + used for mTLS (you can ignore it when using regular, + one-way TLS) + properties: + certFile: + description: certFile defines the path to the certificate + file name within the config map or secret + type: string + certKey: + description: certKey defines the path to the certificate + private key file name within the config map or + secret. Omit when the key is not necessary. + type: string + name: + description: name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes same + namespace as where NetObserv is deployed. If the + namespace is different, the config map or the + secret will be copied so that it can be mounted + as required. + type: string + type: + description: 'type for the certificate reference: + "configmap" or "secret"' + enum: + - configmap + - secret + type: string + type: object + type: object + topic: + default: "" + description: kafka topic to use. It must exist, NetObserv + will not create it. + type: string + required: + - address + - topic + type: object + type: + description: type selects the type of exporters. The available + options are "KAFKA" and "IPFIX". "IPFIX" is unsupported + (*). + enum: + - KAFKA + - IPFIX + type: string + required: + - type + type: object + type: array + kafka: + description: kafka configuration, allowing to use Kafka as a broker + as part of the flow collection pipeline. Available when the "spec.deploymentModel" + is "KAFKA". + properties: + address: + default: "" + description: address of the Kafka server + type: string + tls: + description: tls client configuration. When using TLS, verify + that the address matches the Kafka port used for TLS, generally + 9093. Note that, when eBPF agents are used, Kafka certificate + needs to be copied in the agent namespace (by default it's netobserv-privileged). + properties: + caCert: + description: caCert defines the reference of the certificate + for the Certificate Authority + properties: + certFile: + description: certFile defines the path to the certificate + file name within the config map or secret + type: string + certKey: + description: certKey defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary. + type: string + name: + description: name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string + type: + description: 'type for the certificate reference: "configmap" + or "secret"' + enum: + - configmap + - secret + type: string + type: object + enable: + default: false + description: enable TLS + type: boolean + insecureSkipVerify: + default: false + description: insecureSkipVerify allows skipping client-side + verification of the server certificate If set to true, CACert + field will be ignored + type: boolean + userCert: + description: userCert defines the user certificate reference, + used for mTLS (you can ignore it when using regular, one-way + TLS) + properties: + certFile: + description: certFile defines the path to the certificate + file name within the config map or secret + type: string + certKey: + description: certKey defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary. + type: string + name: + description: name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string + type: + description: 'type for the certificate reference: "configmap" + or "secret"' + enum: + - configmap + - secret + type: string + type: object + type: object + topic: + default: "" + description: kafka topic to use. It must exist, NetObserv will + not create it. + type: string + required: + - address + - topic + type: object + loki: + description: loki, the flow store, client settings. + properties: + batchSize: + default: 102400 + description: batchSize is max batch size (in bytes) of logs to + accumulate before sending. + format: int64 + minimum: 1 + type: integer + batchWait: + default: 1s + description: batchWait is max time to wait before sending a batch. + type: string + manual: + properties: + authToken: + default: DISABLED + description: AuthToken describe the way to get a token to + authenticate to Loki. DISABLED will not send any token with + the request. HOST - deprecated (*) - will use the + local pod service account to authenticate to Loki. FORWARD + will forward the user token for authorization. When using + the Loki Operator, this should be set to `FORWARD`. + enum: + - DISABLED + - HOST + - FORWARD + type: string + querierUrl: + description: querierURL specifies the address of the Loki + querier service, in case it is different from the Loki ingester + URL. If empty, the URL value will be used (assuming that + the Loki ingester and querier are in the same server). When + using the Loki Operator, do not set it, since ingestion + and queries use the Loki gateway. + type: string + statusTls: + description: tls client configuration for loki status URL. + properties: + caCert: + description: caCert defines the reference of the certificate + for the Certificate Authority + properties: + certFile: + description: certFile defines the path to the certificate + file name within the config map or secret + type: string + certKey: + description: certKey defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary. + type: string + name: + description: name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes same + namespace as where NetObserv is deployed. If the + namespace is different, the config map or the secret + will be copied so that it can be mounted as required. + type: string + type: + description: 'type for the certificate reference: + "configmap" or "secret"' + enum: + - configmap + - secret + type: string + type: object + enable: + default: false + description: enable TLS + type: boolean + insecureSkipVerify: + default: false + description: insecureSkipVerify allows skipping client-side + verification of the server certificate If set to true, + CACert field will be ignored + type: boolean + userCert: + description: userCert defines the user certificate reference, + used for mTLS (you can ignore it when using regular, + one-way TLS) + properties: + certFile: + description: certFile defines the path to the certificate + file name within the config map or secret + type: string + certKey: + description: certKey defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary. + type: string + name: + description: name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes same + namespace as where NetObserv is deployed. If the + namespace is different, the config map or the secret + will be copied so that it can be mounted as required. + type: string + type: + description: 'type for the certificate reference: + "configmap" or "secret"' + enum: + - configmap + - secret + type: string + type: object + type: object + statusUrl: + description: statusURL specifies the address of the Loki /ready + /metrics /config endpoints, in case it is different from + the Loki querier URL. If empty, the QuerierURL value will + be used. This is useful to show error messages and some + context in the frontend. When using the Loki Operator, set + it to the Loki HTTP query frontend service, for example + https://loki-query-frontend-http.netobserv.svc:3100/. statusTLS + configuration will be used when statusUrl is set. + type: string + tenantID: + default: netobserv + description: tenantID is the Loki X-Scope-OrgID that identifies + the tenant for each request. When using the Loki Operator, + set it to `network`, which corresponds to a special tenant + mode. + type: string + tls: + description: tls client configuration for loki URL. + properties: + caCert: + description: caCert defines the reference of the certificate + for the Certificate Authority + properties: + certFile: + description: certFile defines the path to the certificate + file name within the config map or secret + type: string + certKey: + description: certKey defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary. + type: string + name: + description: name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes same + namespace as where NetObserv is deployed. If the + namespace is different, the config map or the secret + will be copied so that it can be mounted as required. + type: string + type: + description: 'type for the certificate reference: + "configmap" or "secret"' + enum: + - configmap + - secret + type: string + type: object + enable: + default: false + description: enable TLS + type: boolean + insecureSkipVerify: + default: false + description: insecureSkipVerify allows skipping client-side + verification of the server certificate If set to true, + CACert field will be ignored + type: boolean + userCert: + description: userCert defines the user certificate reference, + used for mTLS (you can ignore it when using regular, + one-way TLS) + properties: + certFile: + description: certFile defines the path to the certificate + file name within the config map or secret + type: string + certKey: + description: certKey defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary. + type: string + name: + description: name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes same + namespace as where NetObserv is deployed. If the + namespace is different, the config map or the secret + will be copied so that it can be mounted as required. + type: string + type: + description: 'type for the certificate reference: + "configmap" or "secret"' + enum: + - configmap + - secret + type: string + type: object + type: object + url: + default: http://loki:3100/ + description: url is the address of an existing Loki service + to push the flows to. When using the Loki Operator, set + it to the Loki gateway service with the `network` tenant + set in path, for example https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network. + type: string + type: object + maxBackoff: + default: 5s + description: maxBackoff is the maximum backoff time for client + connection between retries. + type: string + maxRetries: + default: 2 + description: maxRetries is the maximum number of retries for client + connections. + format: int32 + minimum: 0 + type: integer + minBackoff: + default: 1s + description: minBackoff is the initial backoff time for client + connection between retries. + type: string + mode: + enum: + - MANUAL + type: string + staticLabels: + additionalProperties: + type: string + default: + app: netobserv-flowcollector + description: staticLabels is a map of common labels to set on + each flow. + type: object + timeout: + default: 10s + description: timeout is the maximum time connection / request + limit. A Timeout of zero means no timeout. + type: string + type: object + namespace: + description: namespace where NetObserv pods are deployed. If empty, + the namespace of the operator is going to be used. + type: string + processor: + description: processor defines the settings of the component that + receives the flows from the agent, enriches them, and forwards them + to the Loki persistence layer. + properties: + conversationEndTimeout: + default: 10s + description: conversation end timeout is the duration of time + to wait from the last flow log to end a conversation + type: string + conversationHeartbeatInterval: + default: 30s + description: conversation heartbeat interval is the duration of + time to wait between heartbeat reports of a conversation + type: string + conversationTerminatingTimeout: + default: 5s + description: conversation terminating timeout is the duration + of time to wait from detected FIN flag to end a connection + type: string + debug: + description: Debug allows setting some aspects of the internal + configuration of the flow processor. This section is aimed exclusively + for debugging and fine-grained performance optimizations (for + example GOGC, GOMAXPROCS env vars). Users setting its values + do it at their own risk. + properties: + env: + additionalProperties: + type: string + description: env allows passing custom environment variables + to the NetObserv Agent. Useful for passing some very concrete + performance-tuning options (such as GOGC, GOMAXPROCS) that + shouldn't be publicly exposed as part of the FlowCollector + descriptor, as they are only useful in edge debug and support + scenarios. + type: object + type: object + dropUnusedFields: + default: true + description: dropUnusedFields allows, when set to true, to drop + fields that are known to be unused by OVS, in order to save + storage space. + type: boolean + enableKubeProbes: + default: true + description: enableKubeProbes is a flag to enable or disable Kubernetes + liveness and readiness probes + type: boolean + healthPort: + default: 8080 + description: healthPort is a collector HTTP port in the Pod that + exposes the health check API + format: int32 + maximum: 65535 + minimum: 1 + type: integer + imagePullPolicy: + default: IfNotPresent + description: imagePullPolicy is the Kubernetes pull policy for + the image defined above + enum: + - IfNotPresent + - Always + - Never + type: string + kafkaConsumerAutoscaler: + description: kafkaConsumerAutoscaler spec of a horizontal pod + autoscaler to set up for flowlogs-pipeline-transformer, which + consumes Kafka messages. This setting is ignored when Kafka + is disabled. + properties: + maxReplicas: + default: 3 + description: maxReplicas is the upper limit for the number + of pods that can be set by the autoscaler; cannot be smaller + than MinReplicas. + format: int32 + type: integer + metrics: + description: metrics used by the pod autoscaler + items: + description: MetricSpec specifies how to scale based on + a single metric (only `type` and one other matching field + should be set at once). + properties: + containerResource: + description: containerResource refers to a resource + metric (such as those specified in requests and limits) + known to Kubernetes describing a single container + in each pod of the current scale target (e.g. CPU + or memory). Such metrics are built in to Kubernetes, + and have special scaling options on top of those available + to normal per-pod metrics using the "pods" source. + This is an alpha feature and can be enabled by the + HPAContainerMetrics feature flag. + properties: + container: + description: container is the name of the container + in the pods of the scaling target + type: string + name: + description: name is the name of the resource in + question. + type: string + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - container + - name + - target + type: object + external: + description: external refers to a global metric that + is not associated with any Kubernetes object. It allows + autoscaling based on information coming from components + running outside of cluster (for example length of + queue in cloud messaging service, or QPS from loadbalancer + running outside of cluster). + properties: + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - metric + - target + type: object + object: + description: object refers to a metric describing a + single kubernetes object (for example, hits-per-second + on an Ingress object). + properties: + describedObject: + description: describedObject specifies the descriptions + of a object,such as kind,name apiVersion + properties: + apiVersion: + description: API version of the referent + type: string + kind: + description: 'Kind of the referent; More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"' + type: string + name: + description: 'Name of the referent; More info: + http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + required: + - kind + - name + type: object + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - describedObject + - metric + - target + type: object + pods: + description: pods refers to a metric describing each + pod in the current scale target (for example, transactions-processed-per-second). The + values will be averaged together before being compared + to the target value. + properties: + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - metric + - target + type: object + resource: + description: resource refers to a resource metric (such + as those specified in requests and limits) known to + Kubernetes describing each pod in the current scale + target (e.g. CPU or memory). Such metrics are built + in to Kubernetes, and have special scaling options + on top of those available to normal per-pod metrics + using the "pods" source. + properties: + name: + description: name is the name of the resource in + question. + type: string + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - name + - target + type: object + type: + description: 'type is the type of metric source. It + should be one of "ContainerResource", "External", + "Object", "Pods" or "Resource", each mapping to a + matching field in the object. Note: "ContainerResource" + type is available on when the feature-gate HPAContainerMetrics + is enabled' + type: string + required: + - type + type: object + type: array + minReplicas: + description: minReplicas is the lower limit for the number + of replicas to which the autoscaler can scale down. It + defaults to 1 pod. minReplicas is allowed to be 0 if the + alpha feature gate HPAScaleToZero is enabled and at least + one Object or External metric is configured. Scaling is + active as long as at least one metric value is available. + format: int32 + type: integer + status: + default: DISABLED + description: Status describe the desired status regarding + deploying an horizontal pod autoscaler DISABLED will not + deploy an horizontal pod autoscaler ENABLED will deploy + an horizontal pod autoscaler + enum: + - DISABLED + - ENABLED + type: string + type: object + kafkaConsumerBatchSize: + default: 10485760 + description: 'kafkaConsumerBatchSize indicates to the broker the + maximum batch size, in bytes, that the consumer will accept. + Ignored when not using Kafka. Default: 10MB.' + type: integer + kafkaConsumerQueueCapacity: + default: 1000 + description: kafkaConsumerQueueCapacity defines the capacity of + the internal message queue used in the Kafka consumer client. + Ignored when not using Kafka. + type: integer + kafkaConsumerReplicas: + default: 3 + description: kafkaConsumerReplicas defines the number of replicas + (pods) to start for flowlogs-pipeline-transformer, which consumes + Kafka messages. This setting is ignored when Kafka is disabled. + format: int32 + minimum: 0 + type: integer + logLevel: + default: info + description: logLevel of the collector runtime + enum: + - trace + - debug + - info + - warn + - error + - fatal + - panic + type: string + logTypes: + default: FLOWS + description: logTypes defines the desired record types to generate. + Possible values are "FLOWS" (default) to export flowLogs, "CONVERSATIONS" + to generate newConnection, heartbeat, endConnection events, + "ENDED_CONVERSATIONS" to generate only endConnection events + or "ALL" to generate both flow logs and conversations events + enum: + - FLOWS + - CONVERSATIONS + - ENDED_CONVERSATIONS + - ALL + type: string + metrics: + description: Metrics define the processor configuration regarding + metrics + properties: + disableAlerts: + description: 'disableAlerts is a list of alerts that should + be disabled. Possible values are: `NetObservNoFlows`, which + is triggered when no flows are being observed for a certain + period. `NetObservLokiError`, which is triggered when flows + are being dropped due to Loki errors.' + items: + description: 'Name of a processor alert. Possible values + are: `NetObservNoFlows`, which is triggered when no flows + are being observed for a certain period. `NetObservLokiError`, + which is triggered when flows are being dropped due to + Loki errors.' + enum: + - NetObservNoFlows + - NetObservLokiError + type: string + type: array + ignoreTags: + default: + - egress + - packets + description: 'ignoreTags is a list of tags to specify which + metrics to ignore. Each metric is associated with a list + of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions + . Available tags are: egress, ingress, flows, bytes, packets, + namespaces, nodes, workloads' + items: + type: string + type: array + server: + description: metricsServer endpoint configuration for Prometheus + scraper + properties: + port: + default: 9102 + description: the prometheus HTTP port + format: int32 + maximum: 65535 + minimum: 1 + type: integer + tls: + description: TLS configuration. + properties: + provided: + description: TLS configuration. + properties: + certFile: + description: certFile defines the path to the + certificate file name within the config map + or secret + type: string + certKey: + description: certKey defines the path to the certificate + private key file name within the config map + or secret. Omit when the key is not necessary. + type: string + name: + description: name of the config map or secret + containing certificates + type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes + same namespace as where NetObserv is deployed. + If the namespace is different, the config map + or the secret will be copied so that it can + be mounted as required. + type: string + type: + description: 'type for the certificate reference: + "configmap" or "secret"' + enum: + - configmap + - secret + type: string + type: object + type: + default: DISABLED + description: Select the type of TLS configuration + "DISABLED" (default) to not configure TLS for the + endpoint, "PROVIDED" to manually provide cert file + and a key file, and "AUTO" to use OpenShift auto + generated certificate using annotations + enum: + - DISABLED + - PROVIDED + - AUTO + type: string + type: object + type: object + type: object + port: + default: 2055 + description: 'port of the flow collector (host port) By conventions, + some value are not authorized port must not be below 1024 and + must not equal this values: 4789,6081,500, and 4500' + format: int32 + maximum: 65535 + minimum: 1025 + type: integer + profilePort: + description: profilePort allows setting up a Go pprof profiler + listening to this port + format: int32 + maximum: 65535 + minimum: 0 + type: integer + resources: + default: + limits: + memory: 800Mi + requests: + cpu: 100m + memory: 100Mi + description: 'resources are the compute resources required by + this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + required: + - agent + - deploymentModel + type: object + status: + description: FlowCollectorStatus defines the observed state of FlowCollector + properties: + conditions: + description: conditions represent the latest available observations + of an object's state + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: + \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type + \ // +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + namespace: + description: namespace where console plugin and flowlogs-pipeline + have been deployed. + type: string + required: + - conditions + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml index 058536189..9130aa0e1 100644 --- a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml +++ b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml @@ -386,6 +386,9 @@ spec: kind: FlowCollector name: flowcollectors.flows.netobserv.io version: v1beta1 + - kind: FlowCollector + name: flowcollectors.flows.netobserv.io + version: v1beta2 description: |- NetObserv Operator is an OpenShift / Kubernetes operator for network observability. It deploys a monitoring pipeline to collect and enrich network flows. These flows can be produced by the NetObserv eBPF agent, or by any device or CNI able to export flows in IPFIX format, such as OVN-Kubernetes. diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml index 46fed698f..e4ffc60e1 100644 --- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml +++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml @@ -3693,7 +3693,7 @@ spec: - topic type: object loki: - description: Loki, the flow store, client settings. + description: loki, the flow store, client settings. properties: authToken: default: DISABLED @@ -4904,6 +4904,2489 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.agent.type + name: Agent + type: string + - jsonPath: .spec.agent.ebpf.sampling + name: Sampling (EBPF) + type: string + - jsonPath: .spec.deploymentModel + name: Deployment Model + type: string + - jsonPath: .status.conditions[*].reason + name: Status + type: string + name: v1beta2 + schema: + openAPIV3Schema: + description: '`FlowCollector` is the schema for the network flows collection + API, which pilots and configures the underlying deployments.' + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'Defines the desired state of the FlowCollector resource. +

*: the mention of "unsupported", or "deprecated" + for a feature throughout this document means that this feature is not + officially supported by Red Hat. It might have been, for instance, contributed + by the community and accepted without a formal agreement for maintenance. + The product maintainers might provide some support for these features + as a best effort only.' + properties: + agent: + description: Agent configuration for flows extraction. + properties: + ebpf: + description: '`ebpf` describes the settings related to the eBPF-based + flow reporter when `spec.agent.type` is set to `EBPF`.' + properties: + cacheActiveTimeout: + default: 5s + description: '`cacheActiveTimeout` is the max period during + which the reporter will aggregate flows before sending. + Increasing `cacheMaxFlows` and `cacheActiveTimeout` can + decrease the network traffic overhead and the CPU load, + however you can expect higher memory consumption and an + increased latency in the flow collection.' + pattern: ^\d+(ns|ms|s|m)?$ + type: string + cacheMaxFlows: + default: 100000 + description: '`cacheMaxFlows` is the max number of flows in + an aggregate; when reached, the reporter sends the flows. + Increasing `cacheMaxFlows` and `cacheActiveTimeout` can + decrease the network traffic overhead and the CPU load, + however you can expect higher memory consumption and an + increased latency in the flow collection.' + format: int32 + minimum: 1 + type: integer + debug: + description: '`debug` allows setting some aspects of the internal + configuration of the eBPF agent. This section is aimed exclusively + for debugging and fine-grained performance optimizations, + such as GOGC and GOMAXPROCS env vars. Users setting its + values do it at their own risk.' + properties: + env: + additionalProperties: + type: string + description: '`env` allows passing custom environment + variables to underlying components. Useful for passing + some very concrete performance-tuning options, such + as GOGC and GOMAXPROCS, that should not be publicly + exposed as part of the FlowCollector descriptor, as + they are only useful in edge debug or support scenarios.' + type: object + type: object + enableDNSTracking: + default: false + description: Enable the DNS tracking feature. This feature + requires mounting the kernel debug filesystem hence the + eBPF pod has to run as privileged. If the spec.agent.eBPF.privileged + parameter is not set, an error is reported. + type: boolean + enablePktDrop: + default: false + description: Enable the Packets drop flows logging feature. + This feature requires mounting the kernel debug filesystem, + so the eBPF pod has to run as privileged. If the spec.agent.eBPF.privileged + parameter is not set, an error is reported. + type: boolean + excludeInterfaces: + default: + - lo + description: '`excludeInterfaces` contains the interface names + that will be excluded from flow tracing. An entry is enclosed + by slashes, such as `/br-/`, is matched as a regular expression. + Otherwise it is matched as a case-sensitive string.' + items: + type: string + type: array + imagePullPolicy: + default: IfNotPresent + description: '`imagePullPolicy` is the Kubernetes pull policy + for the image defined above' + enum: + - IfNotPresent + - Always + - Never + type: string + interfaces: + description: '`interfaces` contains the interface names from + where flows will be collected. If empty, the agent will + fetch all the interfaces in the system, excepting the ones + listed in ExcludeInterfaces. An entry is enclosed by slashes, + such as `/br-/`, is matched as a regular expression. Otherwise + it is matched as a case-sensitive string.' + items: + type: string + type: array + kafkaBatchSize: + default: 10485760 + description: '`kafkaBatchSize` limits the maximum size of + a request in bytes before being sent to a partition. Ignored + when not using Kafka. Default: 10MB.' + type: integer + logLevel: + default: info + description: '`logLevel` defines the log level for the NetObserv + eBPF Agent' + enum: + - trace + - debug + - info + - warn + - error + - fatal + - panic + type: string + privileged: + description: 'Privileged mode for the eBPF Agent container. + In general this setting can be ignored or set to false: + in that case, the operator will set granular capabilities + (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, + to enable its correct operation. If for some reason these + capabilities cannot be set, such as if an old kernel version + not knowing CAP_BPF is in use, then you can turn on this + mode for more global privileges.' + type: boolean + resources: + default: + limits: + memory: 800Mi + requests: + cpu: 100m + memory: 50Mi + description: '`resources` are the compute resources required + by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + sampling: + default: 50 + description: Sampling rate of the flow reporter. 100 means + one flow on 100 is sent. 0 or 1 means all flows are sampled. + format: int32 + minimum: 0 + type: integer + type: object + ipfix: + description: '`ipfix` - deprecated (*) - describes the + settings related to the IPFIX-based flow reporter when `spec.agent.type` + is set to `IPFIX`.' + properties: + cacheActiveTimeout: + default: 20s + description: '`cacheActiveTimeout` is the max period during + which the reporter will aggregate flows before sending' + pattern: ^\d+(ns|ms|s|m)?$ + type: string + cacheMaxFlows: + default: 400 + description: '`cacheMaxFlows` is the max number of flows in + an aggregate; when reached, the reporter sends the flows' + format: int32 + minimum: 0 + type: integer + clusterNetworkOperator: + description: '`clusterNetworkOperator` defines the settings + related to the OpenShift Cluster Network Operator, when + available.' + properties: + namespace: + default: openshift-network-operator + description: Namespace where the config map is going + to be deployed. + type: string + type: object + forceSampleAll: + default: false + description: '`forceSampleAll` allows disabling sampling in + the IPFIX-based flow reporter. It is not recommended to + sample all the traffic with IPFIX, as it might generate + cluster instability. If you REALLY want to do that, set + this flag to true. Use at your own risk. When it is set + to true, the value of `sampling` is ignored.' + type: boolean + ovnKubernetes: + description: '`ovnKubernetes` defines the settings of the + OVN-Kubernetes CNI, when available. This configuration is + used when using OVN''s IPFIX exports, without OpenShift. + When using OpenShift, refer to the `clusterNetworkOperator` + property instead.' + properties: + containerName: + default: ovnkube-node + description: '`containerName` defines the name of the + container to configure for IPFIX.' + type: string + daemonSetName: + default: ovnkube-node + description: '`daemonSetName` defines the name of the + DaemonSet controlling the OVN-Kubernetes pods.' + type: string + namespace: + default: ovn-kubernetes + description: Namespace where OVN-Kubernetes pods are deployed. + type: string + type: object + sampling: + default: 400 + description: '`sampling` is the sampling rate on the reporter. + 100 means one flow on 100 is sent. To ensure cluster stability, + it is not possible to set a value below 2. If you really + want to sample every packet, which might impact the cluster + stability, refer to `forceSampleAll`. Alternatively, you + can use the eBPF Agent instead of IPFIX.' + format: int32 + minimum: 2 + type: integer + type: object + type: + default: EBPF + description: '`type` selects the flows tracing agent. Possible + values are:
- `EBPF` (default) to use NetObserv eBPF agent.
+ - `IPFIX` - deprecated (*) - to use the legacy IPFIX + collector.
`EBPF` is recommended as it offers better performances + and should work regardless of the CNI installed on the cluster. + `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work + if they support exporting IPFIX, but they would require manual + configuration).' + enum: + - EBPF + - IPFIX + type: string + type: object + consolePlugin: + description: '`consolePlugin` defines the settings related to the + OpenShift Console plugin, when available.' + properties: + autoscaler: + description: '`autoscaler` spec of a horizontal pod autoscaler + to set up for the plugin Deployment.' + properties: + maxReplicas: + default: 3 + description: '`maxReplicas` is the upper limit for the number + of pods that can be set by the autoscaler; cannot be smaller + than MinReplicas.' + format: int32 + type: integer + metrics: + description: Metrics used by the pod autoscaler + items: + description: MetricSpec specifies how to scale based on + a single metric (only `type` and one other matching field + should be set at once). + properties: + containerResource: + description: containerResource refers to a resource + metric (such as those specified in requests and limits) + known to Kubernetes describing a single container + in each pod of the current scale target (e.g. CPU + or memory). Such metrics are built in to Kubernetes, + and have special scaling options on top of those available + to normal per-pod metrics using the "pods" source. + This is an alpha feature and can be enabled by the + HPAContainerMetrics feature flag. + properties: + container: + description: container is the name of the container + in the pods of the scaling target + type: string + name: + description: name is the name of the resource in + question. + type: string + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - container + - name + - target + type: object + external: + description: external refers to a global metric that + is not associated with any Kubernetes object. It allows + autoscaling based on information coming from components + running outside of cluster (for example length of + queue in cloud messaging service, or QPS from loadbalancer + running outside of cluster). + properties: + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - metric + - target + type: object + object: + description: object refers to a metric describing a + single kubernetes object (for example, hits-per-second + on an Ingress object). + properties: + describedObject: + description: describedObject specifies the descriptions + of a object,such as kind,name apiVersion + properties: + apiVersion: + description: API version of the referent + type: string + kind: + description: 'Kind of the referent; More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"' + type: string + name: + description: 'Name of the referent; More info: + http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + required: + - kind + - name + type: object + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - describedObject + - metric + - target + type: object + pods: + description: pods refers to a metric describing each + pod in the current scale target (for example, transactions-processed-per-second). The + values will be averaged together before being compared + to the target value. + properties: + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - metric + - target + type: object + resource: + description: resource refers to a resource metric (such + as those specified in requests and limits) known to + Kubernetes describing each pod in the current scale + target (e.g. CPU or memory). Such metrics are built + in to Kubernetes, and have special scaling options + on top of those available to normal per-pod metrics + using the "pods" source. + properties: + name: + description: name is the name of the resource in + question. + type: string + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - name + - target + type: object + type: + description: 'type is the type of metric source. It + should be one of "ContainerResource", "External", + "Object", "Pods" or "Resource", each mapping to a + matching field in the object. Note: "ContainerResource" + type is available on when the feature-gate HPAContainerMetrics + is enabled' + type: string + required: + - type + type: object + type: array + minReplicas: + description: '`minReplicas` is the lower limit for the number + of replicas to which the autoscaler can scale down. It defaults + to 1 pod. minReplicas is allowed to be 0 if the alpha feature + gate HPAScaleToZero is enabled and at least one Object or + External metric is configured. Scaling is active as long + as at least one metric value is available.' + format: int32 + type: integer + status: + default: DISABLED + description: '`status` describes the desired status regarding + deploying an horizontal pod autoscaler.
- `DISABLED` + will not deploy an horizontal pod autoscaler.
- `ENABLED` + will deploy an horizontal pod autoscaler.
' + enum: + - DISABLED + - ENABLED + type: string + type: object + enable: + default: true + description: enable the console plugin deployment. spec.Loki.enable + must also be true + type: boolean + imagePullPolicy: + default: IfNotPresent + description: '`imagePullPolicy` is the Kubernetes pull policy + for the image defined above' + enum: + - IfNotPresent + - Always + - Never + type: string + logLevel: + default: info + description: '`logLevel` for the console plugin backend' + enum: + - trace + - debug + - info + - warn + - error + - fatal + - panic + type: string + port: + default: 9001 + description: '`port` is the plugin service port. Do not use 9002, + which is reserved for metrics.' + format: int32 + maximum: 65535 + minimum: 1 + type: integer + portNaming: + default: + enable: true + description: '`portNaming` defines the configuration of the port-to-service + name translation' + properties: + enable: + default: true + description: Enable the console plugin port-to-service name + translation + type: boolean + portNames: + additionalProperties: + type: string + description: '`portNames` defines additional port names to + use in the console, for example, `portNames: {"3100": "loki"}`.' + type: object + type: object + quickFilters: + default: + - default: true + filter: + dst_namespace!: openshift-,netobserv + src_namespace!: openshift-,netobserv + name: Applications + - filter: + dst_namespace: openshift-,netobserv + src_namespace: openshift-,netobserv + name: Infrastructure + - default: true + filter: + dst_kind: Pod + src_kind: Pod + name: Pods network + - filter: + dst_kind: Service + name: Services network + description: '`quickFilters` configures quick filter presets for + the Console plugin' + items: + description: '`QuickFilter` defines preset configuration for + Console''s quick filters' + properties: + default: + description: '`default` defines whether this filter should + be active by default or not' + type: boolean + filter: + additionalProperties: + type: string + description: '`filter` is a set of keys and values to be + set when this filter is selected. Each key can relate + to a list of values using a coma-separated string, for + example, `filter: {"src_namespace": "namespace1,namespace2"}`.' + type: object + name: + description: Name of the filter, that will be displayed + in Console + type: string + required: + - filter + - name + type: object + type: array + register: + default: true + description: '`register` allows, when set to true, to automatically + register the provided console plugin with the OpenShift Console + operator. When set to false, you can still register it manually + by editing console.operator.openshift.io/cluster with the following + command: `oc patch console.operator.openshift.io cluster --type=''json'' + -p ''[{"op": "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]''`' + type: boolean + replicas: + default: 1 + description: '`replicas` defines the number of replicas (pods) + to start.' + format: int32 + minimum: 0 + type: integer + resources: + default: + limits: + memory: 100Mi + requests: + cpu: 100m + memory: 50Mi + description: '`resources`, in terms of compute resources, required + by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + deploymentModel: + default: DIRECT + description: '`deploymentModel` defines the desired type of deployment + for flow processing. Possible values are:
- `DIRECT` (default) + to make the flow processor listening directly from the agents.
+ - `KAFKA` to make flows sent to a Kafka pipeline before consumption + by the processor.
Kafka can provide better scalability, resiliency, + and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka).' + enum: + - DIRECT + - KAFKA + type: string + exporters: + description: '`exporters` define additional optional exporters for + custom consumption or storage.' + items: + description: '`FlowCollectorExporter` defines an additional exporter + to send enriched flows to.' + properties: + ipfix: + description: IPFIX configuration, such as the IP address and + port to send enriched IPFIX flows to. Unsupported (*). + properties: + targetHost: + default: "" + description: Address of the IPFIX external receiver + type: string + targetPort: + description: Port for the IPFIX external receiver + type: integer + transport: + description: Transport protocol (`TCP` or `UDP`) to be used + for the IPFIX connection, defaults to `TCP`. + enum: + - TCP + - UDP + type: string + required: + - targetHost + - targetPort + type: object + kafka: + description: Kafka configuration, such as the address and topic, + to send enriched flows to. + properties: + address: + default: "" + description: Address of the Kafka server + type: string + sasl: + description: SASL authentication configuration. Unsupported + (*) + properties: + clientIDKey: + description: Key for client ID within the provided `reference` + type: string + clientSecretKey: + description: Key for client secret within the provided + `reference` + type: string + reference: + description: Reference to the secret or config map containing + the client ID and secret + properties: + name: + description: Name of the config map or secret to + reference + type: string + namespace: + default: "" + description: Namespace of the config map or secret. + If omitted, assumes same namespace as where NetObserv + is deployed. If the namespace is different, the + config map or the secret will be copied so that + it can be mounted as required. + type: string + type: + description: 'Type for the reference: "configmap" + or "secret"' + enum: + - configmap + - secret + type: string + type: object + type: + default: DISABLED + description: Type of SASL authentication to use, or + `DISABLED` if SASL is not used + enum: + - DISABLED + - PLAIN + - SCRAM-SHA512 + type: string + type: object + tls: + description: TLS client configuration. When using TLS, verify + that the address matches the Kafka port used for TLS, + generally 9093. + properties: + caCert: + description: '`caCert` defines the reference of the + certificate for the Certificate Authority' + properties: + certFile: + description: '`certFile` defines the path to the + certificate file name within the config map or + secret' + type: string + certKey: + description: '`certKey` defines the path to the + certificate private key file name within the config + map or secret. Omit when the key is not necessary.' + type: string + name: + description: Name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing certificates. If omitted, assumes the + same namespace as where NetObserv is deployed. + If the namespace is different, the config map + or the secret will be copied so that it can be + mounted as required. + type: string + type: + description: 'Type for the certificate reference: + `configmap` or `secret`' + enum: + - configmap + - secret + type: string + type: object + enable: + default: false + description: Enable TLS + type: boolean + insecureSkipVerify: + default: false + description: '`insecureSkipVerify` allows skipping client-side + verification of the server certificate. If set to + true, the `caCert` field is ignored.' + type: boolean + userCert: + description: '`userCert` defines the user certificate + reference and is used for mTLS (you can ignore it + when using one-way TLS)' + properties: + certFile: + description: '`certFile` defines the path to the + certificate file name within the config map or + secret' + type: string + certKey: + description: '`certKey` defines the path to the + certificate private key file name within the config + map or secret. Omit when the key is not necessary.' + type: string + name: + description: Name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing certificates. If omitted, assumes the + same namespace as where NetObserv is deployed. + If the namespace is different, the config map + or the secret will be copied so that it can be + mounted as required. + type: string + type: + description: 'Type for the certificate reference: + `configmap` or `secret`' + enum: + - configmap + - secret + type: string + type: object + type: object + topic: + default: "" + description: Kafka topic to use. It must exist, NetObserv + will not create it. + type: string + required: + - address + - topic + type: object + type: + description: '`type` selects the type of exporters. The available + options are `KAFKA` and `IPFIX`. `IPFIX` is unsupported + (*).' + enum: + - KAFKA + - IPFIX + type: string + required: + - type + type: object + type: array + kafka: + description: Kafka configuration, allowing to use Kafka as a broker + as part of the flow collection pipeline. Available when the `spec.deploymentModel` + is `KAFKA`. + properties: + address: + default: "" + description: Address of the Kafka server + type: string + sasl: + description: SASL authentication configuration. Unsupported + (*) + properties: + clientIDKey: + description: Key for client ID within the provided `reference` + type: string + clientSecretKey: + description: Key for client secret within the provided `reference` + type: string + reference: + description: Reference to the secret or config map containing + the client ID and secret + properties: + name: + description: Name of the config map or secret to reference + type: string + namespace: + default: "" + description: Namespace of the config map or secret. If + omitted, assumes same namespace as where NetObserv is + deployed. If the namespace is different, the config + map or the secret will be copied so that it can be mounted + as required. + type: string + type: + description: 'Type for the reference: "configmap" or "secret"' + enum: + - configmap + - secret + type: string + type: object + type: + default: DISABLED + description: Type of SASL authentication to use, or `DISABLED` + if SASL is not used + enum: + - DISABLED + - PLAIN + - SCRAM-SHA512 + type: string + type: object + tls: + description: TLS client configuration. When using TLS, verify + that the address matches the Kafka port used for TLS, generally + 9093. + properties: + caCert: + description: '`caCert` defines the reference of the certificate + for the Certificate Authority' + properties: + certFile: + description: '`certFile` defines the path to the certificate + file name within the config map or secret' + type: string + certKey: + description: '`certKey` defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary.' + type: string + name: + description: Name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret containing + certificates. If omitted, assumes the same namespace + as where NetObserv is deployed. If the namespace is + different, the config map or the secret will be copied + so that it can be mounted as required. + type: string + type: + description: 'Type for the certificate reference: `configmap` + or `secret`' + enum: + - configmap + - secret + type: string + type: object + enable: + default: false + description: Enable TLS + type: boolean + insecureSkipVerify: + default: false + description: '`insecureSkipVerify` allows skipping client-side + verification of the server certificate. If set to true, + the `caCert` field is ignored.' + type: boolean + userCert: + description: '`userCert` defines the user certificate reference + and is used for mTLS (you can ignore it when using one-way + TLS)' + properties: + certFile: + description: '`certFile` defines the path to the certificate + file name within the config map or secret' + type: string + certKey: + description: '`certKey` defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary.' + type: string + name: + description: Name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret containing + certificates. If omitted, assumes the same namespace + as where NetObserv is deployed. If the namespace is + different, the config map or the secret will be copied + so that it can be mounted as required. + type: string + type: + description: 'Type for the certificate reference: `configmap` + or `secret`' + enum: + - configmap + - secret + type: string + type: object + type: object + topic: + default: "" + description: Kafka topic to use. It must exist, NetObserv will + not create it. + type: string + required: + - address + - topic + type: object + loki: + description: loki, the flow store, client settings. + properties: + batchSize: + default: 102400 + description: '`batchSize` is the maximum batch size (in bytes) + of logs to accumulate before sending.' + format: int64 + minimum: 1 + type: integer + batchWait: + default: 1s + description: '`batchWait` is the maximum time to wait before sending + a batch.' + type: string + enable: + default: true + description: enable storing flows to Loki. It is required for + the OpenShift Console plugin installation. + type: boolean + lokiStack: + properties: + name: + default: loki + type: string + namespace: + default: netobserv + type: string + required: + - name + - namespace + type: object + manual: + properties: + authToken: + default: DISABLED + description: '`authToken` describes the way to get a token + to authenticate to Loki.
- `DISABLED` will not send + any token with the request.
- `FORWARD` will forward + the user token for authorization.
- `HOST` - deprecated + (*) - will use the local pod service account to authenticate + to Loki.
When using the Loki Operator, this must be + set to `FORWARD`.' + enum: + - DISABLED + - HOST + - FORWARD + type: string + ingesterUrl: + default: http://loki:3100/ + description: '`ingesterUrl` is the address of an existing + Loki service to push the flows to. When using the Loki Operator, + set it to the Loki gateway service with the `network` tenant + set in path, for example https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network.' + type: string + querierUrl: + description: '`querierURL` specifies the address of the Loki + querier service, in case it is different from the Loki ingester + URL. If empty, the URL value will be used (assuming that + the Loki ingester and querier are in the same server). When + using the Loki Operator, do not set it, since ingestion + and queries use the Loki gateway.' + type: string + statusTls: + description: TLS client configuration for Loki status URL. + properties: + caCert: + description: '`caCert` defines the reference of the certificate + for the Certificate Authority' + properties: + certFile: + description: '`certFile` defines the path to the certificate + file name within the config map or secret' + type: string + certKey: + description: '`certKey` defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary.' + type: string + name: + description: Name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing certificates. If omitted, assumes the + same namespace as where NetObserv is deployed. If + the namespace is different, the config map or the + secret will be copied so that it can be mounted + as required. + type: string + type: + description: 'Type for the certificate reference: + `configmap` or `secret`' + enum: + - configmap + - secret + type: string + type: object + enable: + default: false + description: Enable TLS + type: boolean + insecureSkipVerify: + default: false + description: '`insecureSkipVerify` allows skipping client-side + verification of the server certificate. If set to true, + the `caCert` field is ignored.' + type: boolean + userCert: + description: '`userCert` defines the user certificate + reference and is used for mTLS (you can ignore it when + using one-way TLS)' + properties: + certFile: + description: '`certFile` defines the path to the certificate + file name within the config map or secret' + type: string + certKey: + description: '`certKey` defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary.' + type: string + name: + description: Name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing certificates. If omitted, assumes the + same namespace as where NetObserv is deployed. If + the namespace is different, the config map or the + secret will be copied so that it can be mounted + as required. + type: string + type: + description: 'Type for the certificate reference: + `configmap` or `secret`' + enum: + - configmap + - secret + type: string + type: object + type: object + statusUrl: + description: '`statusURL` specifies the address of the Loki + `/ready`, `/metrics` and `/config` endpoints, in case it + is different from the Loki querier URL. If empty, the `querierURL` + value will be used. This is useful to show error messages + and some context in the frontend. When using the Loki Operator, + set it to the Loki HTTP query frontend service, for example + https://loki-query-frontend-http.netobserv.svc:3100/. `statusTLS` + configuration will be used when `statusUrl` is set.' + type: string + tenantID: + default: netobserv + description: '`tenantID` is the Loki `X-Scope-OrgID` that + identifies the tenant for each request. When using the Loki + Operator, set it to `network`, which corresponds to a special + tenant mode.' + type: string + tls: + description: TLS client configuration for Loki URL. + properties: + caCert: + description: '`caCert` defines the reference of the certificate + for the Certificate Authority' + properties: + certFile: + description: '`certFile` defines the path to the certificate + file name within the config map or secret' + type: string + certKey: + description: '`certKey` defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary.' + type: string + name: + description: Name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing certificates. If omitted, assumes the + same namespace as where NetObserv is deployed. If + the namespace is different, the config map or the + secret will be copied so that it can be mounted + as required. + type: string + type: + description: 'Type for the certificate reference: + `configmap` or `secret`' + enum: + - configmap + - secret + type: string + type: object + enable: + default: false + description: Enable TLS + type: boolean + insecureSkipVerify: + default: false + description: '`insecureSkipVerify` allows skipping client-side + verification of the server certificate. If set to true, + the `caCert` field is ignored.' + type: boolean + userCert: + description: '`userCert` defines the user certificate + reference and is used for mTLS (you can ignore it when + using one-way TLS)' + properties: + certFile: + description: '`certFile` defines the path to the certificate + file name within the config map or secret' + type: string + certKey: + description: '`certKey` defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary.' + type: string + name: + description: Name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing certificates. If omitted, assumes the + same namespace as where NetObserv is deployed. If + the namespace is different, the config map or the + secret will be copied so that it can be mounted + as required. + type: string + type: + description: 'Type for the certificate reference: + `configmap` or `secret`' + enum: + - configmap + - secret + type: string + type: object + type: object + type: object + maxBackoff: + default: 5s + description: '`maxBackoff` is the maximum backoff time for client + connection between retries.' + type: string + maxRetries: + default: 2 + description: '`maxRetries` is the maximum number of retries for + client connections.' + format: int32 + minimum: 0 + type: integer + minBackoff: + default: 1s + description: '`minBackoff` is the initial backoff time for client + connection between retries.' + type: string + mode: + default: LOKISTACK + enum: + - MANUAL + - LOKISTACK + type: string + staticLabels: + additionalProperties: + type: string + default: + app: netobserv-flowcollector + description: '`staticLabels` is a map of common labels to set + on each flow.' + type: object + timeout: + default: 10s + description: '`timeout` is the maximum time connection / request + limit. A timeout of zero means no timeout.' + type: string + type: object + namespace: + default: netobserv + description: Namespace where NetObserv pods are deployed. If empty, + the namespace of the operator is going to be used. + type: string + processor: + description: '`processor` defines the settings of the component that + receives the flows from the agent, enriches them, generates metrics, + and forwards them to the Loki persistence layer and/or any available + exporter.' + properties: + conversationEndTimeout: + default: 10s + description: '`conversationEndTimeout` is the time to wait after + a network flow is received, to consider the conversation ended. + This delay is ignored when a FIN packet is collected for TCP + flows (see `conversationTerminatingTimeout` instead).' + type: string + conversationHeartbeatInterval: + default: 30s + description: '`conversationHeartbeatInterval` is the time to wait + between "tick" events of a conversation' + type: string + conversationTerminatingTimeout: + default: 5s + description: '`conversationTerminatingTimeout` is the time to + wait from detected FIN flag to end a conversation. Only relevant + for TCP flows.' + type: string + debug: + description: '`debug` allows setting some aspects of the internal + configuration of the flow processor. This section is aimed exclusively + for debugging and fine-grained performance optimizations, such + as GOGC and GOMAXPROCS env vars. Users setting its values do + it at their own risk.' + properties: + env: + additionalProperties: + type: string + description: '`env` allows passing custom environment variables + to underlying components. Useful for passing some very concrete + performance-tuning options, such as GOGC and GOMAXPROCS, + that should not be publicly exposed as part of the FlowCollector + descriptor, as they are only useful in edge debug or support + scenarios.' + type: object + type: object + dropUnusedFields: + default: true + description: '`dropUnusedFields` allows, when set to true, to + drop fields that are known to be unused by OVS, to save storage + space.' + type: boolean + enableKubeProbes: + default: true + description: '`enableKubeProbes` is a flag to enable or disable + Kubernetes liveness and readiness probes' + type: boolean + healthPort: + default: 8080 + description: '`healthPort` is a collector HTTP port in the Pod + that exposes the health check API' + format: int32 + maximum: 65535 + minimum: 1 + type: integer + imagePullPolicy: + default: IfNotPresent + description: '`imagePullPolicy` is the Kubernetes pull policy + for the image defined above' + enum: + - IfNotPresent + - Always + - Never + type: string + kafkaConsumerAutoscaler: + description: '`kafkaConsumerAutoscaler` is the spec of a horizontal + pod autoscaler to set up for `flowlogs-pipeline-transformer`, + which consumes Kafka messages. This setting is ignored when + Kafka is disabled.' + properties: + maxReplicas: + default: 3 + description: '`maxReplicas` is the upper limit for the number + of pods that can be set by the autoscaler; cannot be smaller + than MinReplicas.' + format: int32 + type: integer + metrics: + description: Metrics used by the pod autoscaler + items: + description: MetricSpec specifies how to scale based on + a single metric (only `type` and one other matching field + should be set at once). + properties: + containerResource: + description: containerResource refers to a resource + metric (such as those specified in requests and limits) + known to Kubernetes describing a single container + in each pod of the current scale target (e.g. CPU + or memory). Such metrics are built in to Kubernetes, + and have special scaling options on top of those available + to normal per-pod metrics using the "pods" source. + This is an alpha feature and can be enabled by the + HPAContainerMetrics feature flag. + properties: + container: + description: container is the name of the container + in the pods of the scaling target + type: string + name: + description: name is the name of the resource in + question. + type: string + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - container + - name + - target + type: object + external: + description: external refers to a global metric that + is not associated with any Kubernetes object. It allows + autoscaling based on information coming from components + running outside of cluster (for example length of + queue in cloud messaging service, or QPS from loadbalancer + running outside of cluster). + properties: + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - metric + - target + type: object + object: + description: object refers to a metric describing a + single kubernetes object (for example, hits-per-second + on an Ingress object). + properties: + describedObject: + description: describedObject specifies the descriptions + of a object,such as kind,name apiVersion + properties: + apiVersion: + description: API version of the referent + type: string + kind: + description: 'Kind of the referent; More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"' + type: string + name: + description: 'Name of the referent; More info: + http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + required: + - kind + - name + type: object + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - describedObject + - metric + - target + type: object + pods: + description: pods refers to a metric describing each + pod in the current scale target (for example, transactions-processed-per-second). The + values will be averaged together before being compared + to the target value. + properties: + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - metric + - target + type: object + resource: + description: resource refers to a resource metric (such + as those specified in requests and limits) known to + Kubernetes describing each pod in the current scale + target (e.g. CPU or memory). Such metrics are built + in to Kubernetes, and have special scaling options + on top of those available to normal per-pod metrics + using the "pods" source. + properties: + name: + description: name is the name of the resource in + question. + type: string + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - name + - target + type: object + type: + description: 'type is the type of metric source. It + should be one of "ContainerResource", "External", + "Object", "Pods" or "Resource", each mapping to a + matching field in the object. Note: "ContainerResource" + type is available on when the feature-gate HPAContainerMetrics + is enabled' + type: string + required: + - type + type: object + type: array + minReplicas: + description: '`minReplicas` is the lower limit for the number + of replicas to which the autoscaler can scale down. It defaults + to 1 pod. minReplicas is allowed to be 0 if the alpha feature + gate HPAScaleToZero is enabled and at least one Object or + External metric is configured. Scaling is active as long + as at least one metric value is available.' + format: int32 + type: integer + status: + default: DISABLED + description: '`status` describes the desired status regarding + deploying an horizontal pod autoscaler.
- `DISABLED` + will not deploy an horizontal pod autoscaler.
- `ENABLED` + will deploy an horizontal pod autoscaler.
' + enum: + - DISABLED + - ENABLED + type: string + type: object + kafkaConsumerBatchSize: + default: 10485760 + description: '`kafkaConsumerBatchSize` indicates to the broker + the maximum batch size, in bytes, that the consumer will accept. + Ignored when not using Kafka. Default: 10MB.' + type: integer + kafkaConsumerQueueCapacity: + default: 1000 + description: '`kafkaConsumerQueueCapacity` defines the capacity + of the internal message queue used in the Kafka consumer client. + Ignored when not using Kafka.' + type: integer + kafkaConsumerReplicas: + default: 3 + description: '`kafkaConsumerReplicas` defines the number of replicas + (pods) to start for `flowlogs-pipeline-transformer`, which consumes + Kafka messages. This setting is ignored when Kafka is disabled.' + format: int32 + minimum: 0 + type: integer + logLevel: + default: info + description: '`logLevel` of the processor runtime' + enum: + - trace + - debug + - info + - warn + - error + - fatal + - panic + type: string + logTypes: + default: FLOWS + description: '`logTypes` defines the desired record types to generate. + Possible values are:
- `FLOWS` (default) to export regular + network flows
- `CONVERSATIONS` to generate events for started + conversations, ended conversations as well as periodic "tick" + updates
- `ENDED_CONVERSATIONS` to generate only ended conversations + events
- `ALL` to generate both network flows and all conversations + events
' + enum: + - FLOWS + - CONVERSATIONS + - ENDED_CONVERSATIONS + - ALL + type: string + metrics: + description: '`Metrics` define the processor configuration regarding + metrics' + properties: + disableAlerts: + description: '`disableAlerts` is a list of alerts that should + be disabled. Possible values are:
`NetObservNoFlows`, + which is triggered when no flows are being observed for + a certain period.
`NetObservLokiError`, which is triggered + when flows are being dropped due to Loki errors.
' + items: + description: Name of a processor alert. Possible values + are:
- `NetObservNoFlows`, which is triggered when + no flows are being observed for a certain period.
+ - `NetObservLokiError`, which is triggered when flows + are being dropped due to Loki errors.
+ enum: + - NetObservNoFlows + - NetObservLokiError + type: string + type: array + ignoreTags: + default: + - egress + - packets + description: '`ignoreTags` is a list of tags to specify which + metrics to ignore. Each metric is associated with a list + of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions + . Available tags are: `egress`, `ingress`, `flows`, `bytes`, + `packets`, `namespaces`, `nodes`, `workloads`.' + items: + type: string + type: array + server: + description: Metrics server endpoint configuration for Prometheus + scraper + properties: + port: + default: 9102 + description: The prometheus HTTP port + format: int32 + maximum: 65535 + minimum: 1 + type: integer + tls: + description: TLS configuration. + properties: + provided: + description: TLS configuration when `type` is set + to `PROVIDED`. + properties: + certFile: + description: '`certFile` defines the path to the + certificate file name within the config map + or secret' + type: string + certKey: + description: '`certKey` defines the path to the + certificate private key file name within the + config map or secret. Omit when the key is not + necessary.' + type: string + name: + description: Name of the config map or secret + containing certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing certificates. If omitted, assumes + the same namespace as where NetObserv is deployed. + If the namespace is different, the config map + or the secret will be copied so that it can + be mounted as required. + type: string + type: + description: 'Type for the certificate reference: + `configmap` or `secret`' + enum: + - configmap + - secret + type: string + type: object + type: + default: DISABLED + description: Select the type of TLS configuration:
+ - `DISABLED` (default) to not configure TLS for + the endpoint. - `PROVIDED` to manually provide cert + file and a key file. - `AUTO` to use OpenShift auto + generated certificate using annotations. + enum: + - DISABLED + - PROVIDED + - AUTO + type: string + type: object + type: object + type: object + port: + default: 2055 + description: Port of the flow collector (host port). By convention, + some values are forbidden. It must be greater than 1024 and + different from 4500, 4789 and 6081. + format: int32 + maximum: 65535 + minimum: 1025 + type: integer + profilePort: + description: '`profilePort` allows setting up a Go pprof profiler + listening to this port' + format: int32 + maximum: 65535 + minimum: 0 + type: integer + resources: + default: + limits: + memory: 800Mi + requests: + cpu: 100m + memory: 100Mi + description: '`resources` are the compute resources required by + this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + type: object + status: + description: '`FlowCollectorStatus` defines the observed state of FlowCollector' + properties: + conditions: + description: '`conditions` represent the latest available observations + of an object''s state' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: + \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type + \ // +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + namespace: + description: Namespace where console plugin and flowlogs-pipeline + have been deployed. + type: string + required: + - conditions + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/config/samples/flows_v1beta2_flowcollector.yaml b/config/samples/flows_v1beta2_flowcollector.yaml new file mode 100644 index 000000000..da05e4626 --- /dev/null +++ b/config/samples/flows_v1beta2_flowcollector.yaml @@ -0,0 +1,147 @@ +apiVersion: flows.netobserv.io/v1beta2 +kind: FlowCollector +metadata: + name: cluster +spec: + namespace: netobserv + deploymentModel: DIRECT + agent: + type: EBPF + ebpf: + imagePullPolicy: IfNotPresent + sampling: 50 + cacheActiveTimeout: 5s + cacheMaxFlows: 100000 + interfaces: [ ] + excludeInterfaces: [ "lo" ] + logLevel: info + resources: + requests: + memory: 50Mi + cpu: 100m + limits: + memory: 800Mi + kafkaBatchSize: 10485760 + processor: + port: 2055 + imagePullPolicy: IfNotPresent + logLevel: info + profilePort: 6060 + metrics: + server: + port: 9102 + ignoreTags: + - egress + - packets + disableAlerts: [] + dropUnusedFields: true + resources: + requests: + memory: 100Mi + cpu: 100m + limits: + memory: 800Mi + kafkaConsumerReplicas: 3 + kafkaConsumerAutoscaler: null + kafkaConsumerQueueCapacity: 1000 + kafkaConsumerBatchSize: 10485760 + logTypes: FLOWS + conversationTerminatingTimeout: 5s + conversationHeartbeatInterval: 30s + conversationEndTimeout: 10s + kafka: + address: "kafka-cluster-kafka-bootstrap.netobserv" + topic: network-flows + tls: + enable: false + caCert: + type: secret + name: kafka-cluster-cluster-ca-cert + certFile: ca.crt + userCert: + type: secret + name: flp-kafka + certFile: user.crt + certKey: user.key + loki: + mode: MANUAL + manual: + ingesterUrl: 'http://loki.netobserv.svc:3100/' + # Uncomment lines below for typical installation with loki-operator (5.6+ needed) + # and ensure tls and statusTls are enabled + # ingesterUrl: 'https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network/' + # statusUrl: 'https://loki-query-frontend-http.netobserv.svc:3100/' + # authToken: FORWARD + tls: + enable: false + caCert: + type: configmap + name: loki-gateway-ca-bundle + certFile: service-ca.crt + insecureSkipVerify: false + statusTls: + enable: false + caCert: + certFile: service-ca.crt + name: loki-ca-bundle + type: configmap + insecureSkipVerify: false + userCert: + certFile: tls.crt + certKey: tls.key + name: loki-query-frontend-http + type: secret + batchWait: 1s + batchSize: 10485760 + minBackoff: 1s + maxBackoff: 5s + maxRetries: 2 + consolePlugin: + register: true + imagePullPolicy: IfNotPresent + port: 9001 + logLevel: info + autoscaler: + status: DISABLED + minReplicas: 1 + maxReplicas: 3 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 + portNaming: + enable: true + portNames: + "3100": loki + quickFilters: + - name: Applications + filter: + src_namespace!: 'openshift-,netobserv' + dst_namespace!: 'openshift-,netobserv' + default: true + - name: Infrastructure + filter: + src_namespace: 'openshift-,netobserv' + dst_namespace: 'openshift-,netobserv' + - name: Pods network + filter: + src_kind: 'Pod' + dst_kind: 'Pod' + default: true + - name: Services network + filter: + dst_kind: 'Service' + exporters: [] + # - type: KAFKA + # kafka: + # address: "kafka-cluster-kafka-bootstrap.netobserv" + # topic: netobserv-flows-export + # or + # - type: IPFIX + # ipfix: + # targetHost: "ipfix-collector.ipfix.svc.cluster.local" + # targetPort: 4739 + # transport: TCP or UDP (optional - defaults to TCP) \ No newline at end of file diff --git a/config/samples/flows_v1beta2_flowcollector_lokistack.yaml b/config/samples/flows_v1beta2_flowcollector_lokistack.yaml new file mode 100644 index 000000000..c4b05e2ac --- /dev/null +++ b/config/samples/flows_v1beta2_flowcollector_lokistack.yaml @@ -0,0 +1,168 @@ +apiVersion: flows.netobserv.io/v1beta2 +kind: FlowCollector +metadata: + name: cluster +spec: + namespace: netobserv + deploymentModel: DIRECT + agent: + type: EBPF + ebpf: + imagePullPolicy: IfNotPresent + sampling: 50 + cacheActiveTimeout: 5s + cacheMaxFlows: 100000 + interfaces: [ ] + excludeInterfaces: [ "lo" ] + logLevel: info + resources: + requests: + memory: 50Mi + cpu: 100m + limits: + memory: 800Mi + kafkaBatchSize: 10485760 + processor: + port: 2055 + imagePullPolicy: IfNotPresent + logLevel: info + profilePort: 6060 + metrics: + server: + port: 9102 + ignoreTags: + - egress + - packets + disableAlerts: [] + dropUnusedFields: true + resources: + requests: + memory: 100Mi + cpu: 100m + limits: + memory: 800Mi + kafkaConsumerReplicas: 3 + kafkaConsumerAutoscaler: null + kafkaConsumerQueueCapacity: 1000 + kafkaConsumerBatchSize: 10485760 + logTypes: FLOWS + conversationTerminatingTimeout: 5s + conversationHeartbeatInterval: 30s + conversationEndTimeout: 10s + kafka: + address: "kafka-cluster-kafka-bootstrap.netobserv" + topic: network-flows + tls: + enable: false + caCert: + type: secret + name: kafka-cluster-cluster-ca-cert + certFile: ca.crt + userCert: + type: secret + name: flp-kafka + certFile: user.crt + certKey: user.key + loki: + mode: LOKISTACK + lokiStack: + name: loki + namespace: netobserv + batchWait: 1s + batchSize: 10485760 + minBackoff: 1s + maxBackoff: 5s + maxRetries: 2 + consolePlugin: + register: true + imagePullPolicy: IfNotPresent + port: 9001 + logLevel: info + autoscaler: + status: DISABLED + minReplicas: 1 + maxReplicas: 3 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 + portNaming: + enable: true + portNames: + "3100": loki + quickFilters: + - name: Applications + filter: + src_namespace!: 'openshift-,netobserv' + dst_namespace!: 'openshift-,netobserv' + default: true + - name: Infrastructure + filter: + src_namespace: 'openshift-,netobserv' + dst_namespace: 'openshift-,netobserv' + - name: Pods network + filter: + src_kind: 'Pod' + dst_kind: 'Pod' + default: true + - name: Services network + filter: + dst_kind: 'Service' + exporters: [] + # - type: KAFKA + # kafka: + # address: "kafka-cluster-kafka-bootstrap.netobserv" + # topic: netobserv-flows-export + # or + # - type: IPFIX + # ipfix: + # targetHost: "ipfix-collector.ipfix.svc.cluster.local" + # targetPort: 4739 + # transport: TCP or UDP (optional - defaults to TCP) +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: netobserv-reader +rules: +- apiGroups: + - 'loki.grafana.com' + resources: + - network + resourceNames: + - logs + verbs: + - 'get' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: netobserv-writer +rules: +- apiGroups: + - 'loki.grafana.com' + resources: + - network + resourceNames: + - logs + verbs: + - 'create' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: netobserv-writer-flp +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: netobserv-writer +subjects: +- kind: ServiceAccount + name: flowlogs-pipeline + namespace: netobserv +- kind: ServiceAccount + name: flowlogs-pipeline-transformer + namespace: netobserv diff --git a/controllers/consoleplugin/consoleplugin_objects.go b/controllers/consoleplugin/consoleplugin_objects.go index 207f345cf..e9418b187 100644 --- a/controllers/consoleplugin/consoleplugin_objects.go +++ b/controllers/consoleplugin/consoleplugin_objects.go @@ -17,7 +17,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/pkg/helper" "github.com/netobserv/network-observability-operator/pkg/volumes" @@ -175,7 +175,7 @@ func (b *builder) buildArgs(desired *flowslatest.FlowCollectorSpec) []string { "-key", "/var/serving-cert/tls.key", "-loki", querierURL, "-loki-labels", strings.Join(indexFields, ","), - "-loki-tenant-id", desired.Loki.TenantID, + "-loki-tenant-id", desired.Loki.Manual.TenantID, "-loglevel", desired.ConsolePlugin.LogLevel, "-frontend-config", filepath.Join(configPath, configFile), } @@ -188,11 +188,11 @@ func (b *builder) buildArgs(desired *flowslatest.FlowCollectorSpec) []string { args = append(args, "-loki-status", statusURL) } - if desired.Loki.TLS.Enable { - if desired.Loki.TLS.InsecureSkipVerify { + if desired.Loki.Manual.TLS.Enable { + if desired.Loki.Manual.TLS.InsecureSkipVerify { args = append(args, "-loki-skip-tls") } else { - caPath := b.volumes.AddCACertificate(&desired.Loki.TLS, "loki-certs") + caPath := b.volumes.AddCACertificate(&desired.Loki.Manual.TLS, "loki-certs") if caPath != "" { args = append(args, "-loki-ca-path", caPath) } diff --git a/controllers/consoleplugin/consoleplugin_reconciler.go b/controllers/consoleplugin/consoleplugin_reconciler.go index fa84e95dc..2a9a235c9 100644 --- a/controllers/consoleplugin/consoleplugin_reconciler.go +++ b/controllers/consoleplugin/consoleplugin_reconciler.go @@ -14,7 +14,7 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/log" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" @@ -109,13 +109,12 @@ func (r *CPReconciler) Reconcile(ctx context.Context, desired *flowslatest.FlowC if err = r.reconcileHPA(ctx, &builder, &desired.Spec); err != nil { return err } - // Watch for Loki certificates if necessary; we'll ignore in that case the returned digest, as we don't need to restart pods on cert rotation // because certificate is always reloaded from file - if _, err = r.Watcher.ProcessCACert(ctx, r.Client, &desired.Spec.Loki.TLS, r.Namespace); err != nil { + if _, err = r.Watcher.ProcessCACert(ctx, r.Client, &desired.Spec.Loki.Manual.TLS, r.Namespace); err != nil { return err } - if _, _, err = r.Watcher.ProcessMTLSCerts(ctx, r.Client, &desired.Spec.Loki.StatusTLS, r.Namespace); err != nil { + if _, _, err = r.Watcher.ProcessMTLSCerts(ctx, r.Client, &desired.Spec.Loki.Manual.StatusTLS, r.Namespace); err != nil { return err } } else { @@ -267,15 +266,15 @@ func pluginNeedsUpdate(plg *osv1alpha1.ConsolePlugin, desired *pluginSpec) bool } func querierURL(loki *flowslatest.FlowCollectorLoki) string { - if loki.QuerierURL != "" { - return loki.QuerierURL + if loki.Manual.QuerierURL != "" { + return loki.Manual.QuerierURL } - return loki.URL + return loki.Manual.IngesterURL } func statusURL(loki *flowslatest.FlowCollectorLoki) string { - if loki.StatusURL != "" { - return loki.StatusURL + if loki.Manual.StatusURL != "" { + return loki.Manual.StatusURL } return querierURL(loki) } diff --git a/controllers/consoleplugin/consoleplugin_test.go b/controllers/consoleplugin/consoleplugin_test.go index 73e9d40b3..555682d3e 100644 --- a/controllers/consoleplugin/consoleplugin_test.go +++ b/controllers/consoleplugin/consoleplugin_test.go @@ -12,7 +12,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/pointer" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/pkg/helper" ) @@ -108,7 +108,7 @@ func TestContainerUpdateCheck(t *testing.T) { //equals specs plugin := getPluginConfig() - loki := flowslatest.FlowCollectorLoki{URL: "http://loki:3100/", TenantID: "netobserv"} + loki := flowslatest.FlowCollectorLoki{Manual: flowslatest.LokiManualParams{IngesterURL: "http://loki:3100/", TenantID: "netobserv"}} spec := flowslatest.FlowCollectorSpec{ConsolePlugin: plugin, Loki: loki} builder := newBuilder(testNamespace, testImage, &spec) old := builder.deployment("digest") @@ -153,14 +153,14 @@ func TestContainerUpdateCheck(t *testing.T) { old = nEw //new loki config - loki = flowslatest.FlowCollectorLoki{URL: "http://loki:3100/", TenantID: "netobserv", TLS: flowslatest.ClientTLS{ + loki = flowslatest.FlowCollectorLoki{Manual: flowslatest.LokiManualParams{IngesterURL: "http://loki:3100/", TenantID: "netobserv", TLS: flowslatest.ClientTLS{ Enable: true, CACert: flowslatest.CertificateReference{ Type: "configmap", Name: "cm-name", CertFile: "ca.crt", }, - }} + }}} spec = flowslatest.FlowCollectorSpec{ConsolePlugin: plugin, Loki: loki} builder = newBuilder(testNamespace, testImage, &spec) nEw = builder.deployment("digest") @@ -170,7 +170,7 @@ func TestContainerUpdateCheck(t *testing.T) { old = nEw //new loki cert name - loki.TLS.CACert.Name = "cm-name-2" + loki.Manual.TLS.CACert.Name = "cm-name-2" spec = flowslatest.FlowCollectorSpec{ConsolePlugin: plugin, Loki: loki} builder = newBuilder(testNamespace, testImage, &spec) nEw = builder.deployment("digest") @@ -180,7 +180,7 @@ func TestContainerUpdateCheck(t *testing.T) { old = nEw //test again no change - loki.TLS.CACert.Name = "cm-name-2" + loki.Manual.TLS.CACert.Name = "cm-name-2" spec = flowslatest.FlowCollectorSpec{ConsolePlugin: plugin, Loki: loki} builder = newBuilder(testNamespace, testImage, &spec) nEw = builder.deployment("digest") @@ -190,8 +190,8 @@ func TestContainerUpdateCheck(t *testing.T) { old = nEw //set status url and enable default tls - loki.StatusURL = "http://loki.status:3100/" - loki.StatusTLS.Enable = true + loki.Manual.StatusURL = "http://loki.status:3100/" + loki.Manual.StatusTLS.Enable = true spec = flowslatest.FlowCollectorSpec{ConsolePlugin: plugin, Loki: loki} builder = newBuilder(testNamespace, testImage, &spec) @@ -202,7 +202,7 @@ func TestContainerUpdateCheck(t *testing.T) { old = nEw //update status ca cert - loki.StatusTLS.CACert = flowslatest.CertificateReference{ + loki.Manual.StatusTLS.CACert = flowslatest.CertificateReference{ Type: "configmap", Name: "status-cm-name", CertFile: "status-ca.crt", @@ -217,7 +217,7 @@ func TestContainerUpdateCheck(t *testing.T) { old = nEw //update status user cert - loki.StatusTLS.UserCert = flowslatest.CertificateReference{ + loki.Manual.StatusTLS.UserCert = flowslatest.CertificateReference{ Type: "secret", Name: "sec-name", CertFile: "tls.crt", @@ -262,7 +262,7 @@ func TestBuiltService(t *testing.T) { //newly created service should not need update plugin := getPluginConfig() - loki := flowslatest.FlowCollectorLoki{URL: "http://foo:1234"} + loki := flowslatest.FlowCollectorLoki{Manual: flowslatest.LokiManualParams{IngesterURL: "http://foo:1234"}} spec := flowslatest.FlowCollectorSpec{ConsolePlugin: plugin, Loki: loki} builder := newBuilder(testNamespace, testImage, &spec) old := builder.mainService() @@ -276,7 +276,7 @@ func TestLabels(t *testing.T) { assert := assert.New(t) plugin := getPluginConfig() - loki := flowslatest.FlowCollectorLoki{URL: "http://foo:1234"} + loki := flowslatest.FlowCollectorLoki{Manual: flowslatest.LokiManualParams{IngesterURL: "http://foo:1234"}} spec := flowslatest.FlowCollectorSpec{ConsolePlugin: plugin, Loki: loki} builder := newBuilder(testNamespace, testImage, &spec) diff --git a/controllers/ebpf/agent_controller.go b/controllers/ebpf/agent_controller.go index 4a9497af0..81bab68bf 100644 --- a/controllers/ebpf/agent_controller.go +++ b/controllers/ebpf/agent_controller.go @@ -6,7 +6,7 @@ import ( "strconv" "strings" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/controllers/ebpf/internal/permissions" "github.com/netobserv/network-observability-operator/controllers/operator" diff --git a/controllers/ebpf/internal/permissions/permissions.go b/controllers/ebpf/internal/permissions/permissions.go index a7705f32f..f9903ccd0 100644 --- a/controllers/ebpf/internal/permissions/permissions.go +++ b/controllers/ebpf/internal/permissions/permissions.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" diff --git a/controllers/flowcollector_controller.go b/controllers/flowcollector_controller.go index 64681ec75..4ab8721b8 100644 --- a/controllers/flowcollector_controller.go +++ b/controllers/flowcollector_controller.go @@ -23,7 +23,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/consoleplugin" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/controllers/ebpf" diff --git a/controllers/flowcollector_controller_certificates_test.go b/controllers/flowcollector_controller_certificates_test.go index 376c003b8..c068a7dbf 100644 --- a/controllers/flowcollector_controller_certificates_test.go +++ b/controllers/flowcollector_controller_certificates_test.go @@ -10,7 +10,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/utils/pointer" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" . "github.com/netobserv/network-observability-operator/controllers/controllerstest" "github.com/netobserv/network-observability-operator/controllers/flowlogspipeline" @@ -144,18 +144,19 @@ func flowCollectorCertificatesSpecs() { Type: "EBPF", }, Loki: flowslatest.FlowCollectorLoki{ - Enable: pointer.Bool(true), - AuthToken: flowslatest.LokiAuthForwardUserToken, - TLS: flowslatest.ClientTLS{ - Enable: true, - CACert: flowslatest.CertificateReference{ - Type: flowslatest.RefTypeConfigMap, - Name: lokiCert.Name, - Namespace: lokiCert.Namespace, - CertFile: "cert.crt", + Enable: pointer.Bool(true), + Manual: flowslatest.LokiManualParams{ + AuthToken: flowslatest.LokiAuthForwardUserToken, + TLS: flowslatest.ClientTLS{ + Enable: true, + CACert: flowslatest.CertificateReference{ + Type: flowslatest.RefTypeConfigMap, + Name: lokiCert.Name, + Namespace: lokiCert.Namespace, + CertFile: "cert.crt", + }, }, - }, - }, + }}, Kafka: flowslatest.FlowCollectorKafka{ TLS: flowslatest.ClientTLS{ Enable: true, diff --git a/controllers/flowcollector_controller_console_test.go b/controllers/flowcollector_controller_console_test.go index 143c03e01..dc9cca5a9 100644 --- a/controllers/flowcollector_controller_console_test.go +++ b/controllers/flowcollector_controller_console_test.go @@ -15,7 +15,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/utils/pointer" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" . "github.com/netobserv/network-observability-operator/controllers/controllerstest" ) @@ -215,14 +215,14 @@ func flowCollectorConsolePluginSpecs() { }) It("Should update the Loki URL in the Console Plugin if it changes in the Spec", func() { UpdateCR(crKey, func(fc *flowslatest.FlowCollector) { - fc.Spec.Loki.URL = "http://loki.namespace:8888" + fc.Spec.Loki.Manual.IngesterURL = "http://loki.namespace:8888" }) Eventually(getContainerArgumentAfter("netobserv-plugin", "-loki", cpKey), timeout, interval).Should(Equal("http://loki.namespace:8888")) }) It("Should use the Loki Querier URL instead of the Loki URL, if the first is defined", func() { UpdateCR(crKey, func(fc *flowslatest.FlowCollector) { - fc.Spec.Loki.QuerierURL = "http://loki-querier:6789" + fc.Spec.Loki.Manual.QuerierURL = "http://loki-querier:6789" }) Eventually(getContainerArgumentAfter("netobserv-plugin", "-loki", cpKey), timeout, interval).Should(Equal("http://loki-querier:6789")) diff --git a/controllers/flowcollector_controller_ebpf_test.go b/controllers/flowcollector_controller_ebpf_test.go index 6f4b302a7..74fa35349 100644 --- a/controllers/flowcollector_controller_ebpf_test.go +++ b/controllers/flowcollector_controller_ebpf_test.go @@ -12,7 +12,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/utils/pointer" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" . "github.com/netobserv/network-observability-operator/controllers/controllerstest" "github.com/netobserv/network-observability-operator/pkg/helper" diff --git a/controllers/flowcollector_controller_iso_test.go b/controllers/flowcollector_controller_iso_test.go index 83eb7bd3d..3033dee78 100644 --- a/controllers/flowcollector_controller_iso_test.go +++ b/controllers/flowcollector_controller_iso_test.go @@ -11,7 +11,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/utils/pointer" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" ) // nolint:cyclop @@ -116,20 +116,12 @@ func flowCollectorIsoSpecs() { }, QuickFilters: []flowslatest.QuickFilter{}, }, - Loki: flowslatest.FlowCollectorLoki{ - Enable: pointer.Bool(true), - URL: "http://loki", - QuerierURL: "", - StatusURL: "", - TenantID: "test", - AuthToken: "DISABLED", - BatchWait: &metav1.Duration{Duration: time.Second}, - BatchSize: 100, - Timeout: &metav1.Duration{Duration: time.Second}, - MinBackoff: &metav1.Duration{Duration: time.Second}, - MaxBackoff: &metav1.Duration{Duration: time.Second}, - MaxRetries: &zero, - StaticLabels: map[string]string{}, + Loki: flowslatest.FlowCollectorLoki{Manual: flowslatest.LokiManualParams{ + IngesterURL: "http://loki", + QuerierURL: "", + StatusURL: "", + TenantID: "test", + AuthToken: "DISABLED", TLS: flowslatest.ClientTLS{ Enable: false, InsecureSkipVerify: false, @@ -162,6 +154,15 @@ func flowCollectorIsoSpecs() { CertKey: "", }, }, + }, + Enable: pointer.Bool(true), + BatchWait: &metav1.Duration{Duration: time.Second}, + BatchSize: 100, + Timeout: &metav1.Duration{Duration: time.Second}, + MinBackoff: &metav1.Duration{Duration: time.Second}, + MaxBackoff: &metav1.Duration{Duration: time.Second}, + MaxRetries: &zero, + StaticLabels: map[string]string{}, }, Kafka: flowslatest.FlowCollectorKafka{ Address: "http://kafka", diff --git a/controllers/flowcollector_controller_test.go b/controllers/flowcollector_controller_test.go index d10d5f103..01dfd0cb2 100644 --- a/controllers/flowcollector_controller_test.go +++ b/controllers/flowcollector_controller_test.go @@ -17,7 +17,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/utils/pointer" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" . "github.com/netobserv/network-observability-operator/controllers/controllerstest" "github.com/netobserv/network-observability-operator/controllers/flowlogspipeline" @@ -643,7 +643,7 @@ func flowCollectorControllerSpecs() { }, })).Should(Succeed()) UpdateCR(crKey, func(fc *flowslatest.FlowCollector) { - fc.Spec.Loki.TLS = flowslatest.ClientTLS{ + fc.Spec.Loki.Manual.TLS = flowslatest.ClientTLS{ Enable: true, CACert: flowslatest.CertificateReference{ Type: flowslatest.RefTypeConfigMap, @@ -668,7 +668,7 @@ func flowCollectorControllerSpecs() { It("Should restore no TLS config", func() { UpdateCR(crKey, func(fc *flowslatest.FlowCollector) { - fc.Spec.Loki.TLS = flowslatest.ClientTLS{ + fc.Spec.Loki.Manual.TLS = flowslatest.ClientTLS{ Enable: false, } }) diff --git a/controllers/flowlogspipeline/flp_common_objects.go b/controllers/flowlogspipeline/flp_common_objects.go index 13695de68..bbe5ff2df 100644 --- a/controllers/flowlogspipeline/flp_common_objects.go +++ b/controllers/flowlogspipeline/flp_common_objects.go @@ -20,7 +20,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/controllers/globals" "github.com/netobserv/network-observability-operator/controllers/reconcilers" @@ -316,10 +316,10 @@ func (b *builder) addTransformStages(stage *config.PipelineBuilderStage) error { MinBackoff: helper.UnstructuredDuration(b.desired.Loki.MinBackoff), StaticLabels: model.LabelSet{}, Timeout: helper.UnstructuredDuration(b.desired.Loki.Timeout), - URL: b.desired.Loki.URL, + URL: helper.LokiIngesterURL(&b.desired.Loki), TimestampLabel: "TimeFlowEndMs", TimestampScale: "1ms", - TenantID: b.desired.Loki.TenantID, + TenantID: helper.LokiTenantID(&b.desired.Loki), } for k, v := range b.desired.Loki.StaticLabels { @@ -327,7 +327,7 @@ func (b *builder) addTransformStages(stage *config.PipelineBuilderStage) error { } var authorization *promConfig.Authorization - if helper.LokiUseHostToken(&b.desired.Loki) || helper.LokiForwardUserToken(&b.desired.Loki) { + if helper.LokiUseHostToken(&b.desired.Loki) || helper.LokiForwardUserToken(&b.desired.Loki) || helper.LokiModeLokiStack(&b.desired.Loki) { b.volumes.AddToken(constants.FLPName) authorization = &promConfig.Authorization{ Type: "Bearer", @@ -335,28 +335,7 @@ func (b *builder) addTransformStages(stage *config.PipelineBuilderStage) error { } } - if b.desired.Loki.TLS.Enable { - if b.desired.Loki.TLS.InsecureSkipVerify { - lokiWrite.ClientConfig = &promConfig.HTTPClientConfig{ - Authorization: authorization, - TLSConfig: promConfig.TLSConfig{ - InsecureSkipVerify: true, - }, - } - } else { - caPath := b.volumes.AddCACertificate(&b.desired.Loki.TLS, "loki-certs") - lokiWrite.ClientConfig = &promConfig.HTTPClientConfig{ - Authorization: authorization, - TLSConfig: promConfig.TLSConfig{ - CAFile: caPath, - }, - } - } - } else { - lokiWrite.ClientConfig = &promConfig.HTTPClientConfig{ - Authorization: authorization, - } - } + lokiWrite.ClientConfig = helper.LokiTLSClient(&b.desired.Loki, authorization, &b.volumes) enrichedStage.WriteLoki("loki", lokiWrite) } diff --git a/controllers/flowlogspipeline/flp_ingest_objects.go b/controllers/flowlogspipeline/flp_ingest_objects.go index e4082de21..3b5e6513f 100644 --- a/controllers/flowlogspipeline/flp_ingest_objects.go +++ b/controllers/flowlogspipeline/flp_ingest_objects.go @@ -8,7 +8,7 @@ import ( "github.com/netobserv/flowlogs-pipeline/pkg/api" "github.com/netobserv/flowlogs-pipeline/pkg/config" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" ) diff --git a/controllers/flowlogspipeline/flp_ingest_reconciler.go b/controllers/flowlogspipeline/flp_ingest_reconciler.go index 1fcf87a75..6b5359668 100644 --- a/controllers/flowlogspipeline/flp_ingest_reconciler.go +++ b/controllers/flowlogspipeline/flp_ingest_reconciler.go @@ -10,7 +10,7 @@ import ( "k8s.io/apimachinery/pkg/api/equality" "sigs.k8s.io/controller-runtime/pkg/log" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" diff --git a/controllers/flowlogspipeline/flp_monolith_objects.go b/controllers/flowlogspipeline/flp_monolith_objects.go index 2f6f41827..de69cc549 100644 --- a/controllers/flowlogspipeline/flp_monolith_objects.go +++ b/controllers/flowlogspipeline/flp_monolith_objects.go @@ -8,7 +8,7 @@ import ( "github.com/netobserv/flowlogs-pipeline/pkg/api" "github.com/netobserv/flowlogs-pipeline/pkg/config" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" ) diff --git a/controllers/flowlogspipeline/flp_monolith_reconciler.go b/controllers/flowlogspipeline/flp_monolith_reconciler.go index 99c9aaa52..585f692a0 100644 --- a/controllers/flowlogspipeline/flp_monolith_reconciler.go +++ b/controllers/flowlogspipeline/flp_monolith_reconciler.go @@ -10,7 +10,7 @@ import ( "k8s.io/apimachinery/pkg/api/equality" "sigs.k8s.io/controller-runtime/pkg/log" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" @@ -116,7 +116,7 @@ func (r *flpMonolithReconciler) reconcile(ctx context.Context, desired *flowslat // Watch for Loki certificate if necessary; we'll ignore in that case the returned digest, as we don't need to restart pods on cert rotation // because certificate is always reloaded from file - if _, err = r.Watcher.ProcessCACert(ctx, r.Client, &desired.Spec.Loki.TLS, r.Namespace); err != nil { + if _, err = r.Watcher.ProcessCACert(ctx, r.Client, &desired.Spec.Loki.Manual.TLS, r.Namespace); err != nil { return err } diff --git a/controllers/flowlogspipeline/flp_reconciler.go b/controllers/flowlogspipeline/flp_reconciler.go index 471b49121..1a97201af 100644 --- a/controllers/flowlogspipeline/flp_reconciler.go +++ b/controllers/flowlogspipeline/flp_reconciler.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" "github.com/netobserv/network-observability-operator/pkg/watchers" diff --git a/controllers/flowlogspipeline/flp_test.go b/controllers/flowlogspipeline/flp_test.go index 368a8d690..43556391d 100644 --- a/controllers/flowlogspipeline/flp_test.go +++ b/controllers/flowlogspipeline/flp_test.go @@ -31,7 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/pointer" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" @@ -98,9 +98,8 @@ func getConfig() flowslatest.FlowCollectorSpec { Duration: conntrackTerminatingTimeout, }, }, - Loki: flowslatest.FlowCollectorLoki{ - Enable: pointer.Bool(true), - URL: "http://loki:3100/", + Loki: flowslatest.FlowCollectorLoki{Manual: flowslatest.LokiManualParams{ + IngesterURL: "http://loki:3100/"}, BatchWait: &metav1.Duration{ Duration: 1, }, @@ -111,6 +110,7 @@ func getConfig() flowslatest.FlowCollectorSpec { MaxBackoff: &metav1.Duration{ Duration: 300, }, + Enable: pointer.Bool(true), MaxRetries: pointer.Int32(10), StaticLabels: map[string]string{"app": "netobserv-flowcollector"}, }, @@ -279,7 +279,7 @@ func TestDaemonSetChanged(t *testing.T) { assert.Contains(report.String(), "no change") // Check Loki config change - cfg.Loki.TLS = flowslatest.ClientTLS{ + cfg.Loki.Manual.TLS = flowslatest.ClientTLS{ Enable: true, CACert: flowslatest.CertificateReference{ Type: "configmap", @@ -297,7 +297,7 @@ func TestDaemonSetChanged(t *testing.T) { assert.Contains(report.String(), "config-digest") // Check volumes change - cfg.Loki.TLS = flowslatest.ClientTLS{ + cfg.Loki.Manual.TLS = flowslatest.ClientTLS{ Enable: true, CACert: flowslatest.CertificateReference{ Type: "configmap", @@ -624,7 +624,7 @@ func TestConfigMapShouldDeserializeAsJSON(t *testing.T) { assert.Equal(cfg.Processor.Port, int32(params[0].Ingest.Collector.Port)) lokiCfg := params[3].Write.Loki - assert.Equal(loki.URL, lokiCfg.URL) + assert.Equal(loki.Manual.IngesterURL, lokiCfg.URL) assert.Equal(loki.BatchWait.Duration.String(), lokiCfg.BatchWait) assert.Equal(loki.MinBackoff.Duration.String(), lokiCfg.MinBackoff) assert.Equal(loki.MaxBackoff.Duration.String(), lokiCfg.MaxBackoff) diff --git a/controllers/flowlogspipeline/flp_transfo_objects.go b/controllers/flowlogspipeline/flp_transfo_objects.go index 6dff458c1..21e034d12 100644 --- a/controllers/flowlogspipeline/flp_transfo_objects.go +++ b/controllers/flowlogspipeline/flp_transfo_objects.go @@ -9,7 +9,7 @@ import ( "github.com/netobserv/flowlogs-pipeline/pkg/api" "github.com/netobserv/flowlogs-pipeline/pkg/config" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" ) diff --git a/controllers/flowlogspipeline/flp_transfo_reconciler.go b/controllers/flowlogspipeline/flp_transfo_reconciler.go index a8607c39c..36935d857 100644 --- a/controllers/flowlogspipeline/flp_transfo_reconciler.go +++ b/controllers/flowlogspipeline/flp_transfo_reconciler.go @@ -11,7 +11,7 @@ import ( "k8s.io/apimachinery/pkg/api/equality" "sigs.k8s.io/controller-runtime/pkg/log" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" @@ -116,7 +116,7 @@ func (r *flpTransformerReconciler) reconcile(ctx context.Context, desired *flows // Watch for Loki certificate if necessary; we'll ignore in that case the returned digest, as we don't need to restart pods on cert rotation // because certificate is always reloaded from file - if _, err = r.Watcher.ProcessCACert(ctx, r.Client, &desired.Spec.Loki.TLS, r.Namespace); err != nil { + if _, err = r.Watcher.ProcessCACert(ctx, r.Client, &desired.Spec.Loki.Manual.TLS, r.Namespace); err != nil { return err } diff --git a/controllers/ovs/flowsconfig_cno_reconciler.go b/controllers/ovs/flowsconfig_cno_reconciler.go index 4423f8278..1916372d9 100644 --- a/controllers/ovs/flowsconfig_cno_reconciler.go +++ b/controllers/ovs/flowsconfig_cno_reconciler.go @@ -10,7 +10,7 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/log" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" ) diff --git a/controllers/ovs/flowsconfig_ovnk_reconciler.go b/controllers/ovs/flowsconfig_ovnk_reconciler.go index f67ff67d0..926aed71c 100644 --- a/controllers/ovs/flowsconfig_ovnk_reconciler.go +++ b/controllers/ovs/flowsconfig_ovnk_reconciler.go @@ -13,7 +13,7 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/log" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" ) diff --git a/controllers/ovs/flowsconfig_types.go b/controllers/ovs/flowsconfig_types.go index 5a53ef2fe..b84a20957 100644 --- a/controllers/ovs/flowsconfig_types.go +++ b/controllers/ovs/flowsconfig_types.go @@ -8,7 +8,7 @@ import ( "github.com/mitchellh/mapstructure" "sigs.k8s.io/controller-runtime/pkg/log" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" ) type flowsConfig struct { diff --git a/controllers/suite_test.go b/controllers/suite_test.go index e2d52f48b..02b222028 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -44,6 +44,7 @@ import ( flowsv1alpha1 "github.com/netobserv/network-observability-operator/api/v1alpha1" flowsv1beta1 "github.com/netobserv/network-observability-operator/api/v1beta1" + flowsv1beta2 "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/operator" //+kubebuilder:scaffold:imports ) @@ -108,6 +109,9 @@ var _ = BeforeSuite(func() { err = flowsv1beta1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) + err = flowsv1beta2.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + err = corev1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md index ab6193107..533eb032a 100644 --- a/docs/FlowCollector.md +++ b/docs/FlowCollector.md @@ -4,6 +4,7 @@ Packages: - [flows.netobserv.io/v1alpha1](#flowsnetobserviov1alpha1) - [flows.netobserv.io/v1beta1](#flowsnetobserviov1beta1) +- [flows.netobserv.io/v1beta2](#flowsnetobserviov1beta2) # flows.netobserv.io/v1alpha1 @@ -4069,7 +4070,7 @@ Defines the desired state of the FlowCollector resource.

*: the mention loki object - Loki, the flow store, client settings.
+ loki, the flow store, client settings.
false @@ -6524,7 +6525,7 @@ TLS client configuration. When using TLS, verify that the address matches the Ka -Loki, the flow store, client settings. +loki, the flow store, client settings. @@ -8515,6 +8516,4437 @@ Condition contains details for one aspect of the current state of this API Resou type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` // other fields } +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
lastTransitionTimestring + lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+
+ Format: date-time
+
true
messagestring + message is a human readable message indicating details about the transition. This may be an empty string.
+
true
reasonstring + reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.
+
true
statusenum + status of the condition, one of True, False, Unknown.
+
+ Enum: True, False, Unknown
+
true
typestring + type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+
true
observedGenerationinteger + observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.
+
+ Format: int64
+ Minimum: 0
+
false
+ +# flows.netobserv.io/v1beta2 + +Resource Types: + +- [FlowCollector](#flowcollector) + + + + +## FlowCollector +[↩ Parent](#flowsnetobserviov1beta2 ) + + + + + + +`FlowCollector` is the schema for the network flows collection API, which pilots and configures the underlying deployments. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
apiVersionstringflows.netobserv.io/v1beta2true
kindstringFlowCollectortrue
metadataobjectRefer to the Kubernetes API documentation for the fields of the `metadata` field.true
specobject + Defines the desired state of the FlowCollector resource.

*: the mention of "unsupported", or "deprecated" for a feature throughout this document means that this feature is not officially supported by Red Hat. It might have been, for instance, contributed by the community and accepted without a formal agreement for maintenance. The product maintainers might provide some support for these features as a best effort only.
+
false
statusobject + `FlowCollectorStatus` defines the observed state of FlowCollector
+
false
+ + +### FlowCollector.spec +[↩ Parent](#flowcollector-1) + + + +Defines the desired state of the FlowCollector resource.

*: the mention of "unsupported", or "deprecated" for a feature throughout this document means that this feature is not officially supported by Red Hat. It might have been, for instance, contributed by the community and accepted without a formal agreement for maintenance. The product maintainers might provide some support for these features as a best effort only. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
agentobject + Agent configuration for flows extraction.
+
false
consolePluginobject + `consolePlugin` defines the settings related to the OpenShift Console plugin, when available.
+
false
deploymentModelenum + `deploymentModel` defines the desired type of deployment for flow processing. Possible values are:
- `DIRECT` (default) to make the flow processor listening directly from the agents.
- `KAFKA` to make flows sent to a Kafka pipeline before consumption by the processor.
Kafka can provide better scalability, resiliency, and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka).
+
+ Enum: DIRECT, KAFKA
+ Default: DIRECT
+
false
exporters[]object + `exporters` define additional optional exporters for custom consumption or storage.
+
false
kafkaobject + Kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the `spec.deploymentModel` is `KAFKA`.
+
false
lokiobject + loki, the flow store, client settings.
+
false
namespacestring + Namespace where NetObserv pods are deployed. If empty, the namespace of the operator is going to be used.
+
+ Default: netobserv
+
false
processorobject + `processor` defines the settings of the component that receives the flows from the agent, enriches them, generates metrics, and forwards them to the Loki persistence layer and/or any available exporter.
+
false
+ + +### FlowCollector.spec.agent +[↩ Parent](#flowcollectorspec-1) + + + +Agent configuration for flows extraction. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
ebpfobject + `ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `EBPF`.
+
false
ipfixobject + `ipfix` - deprecated (*) - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` is set to `IPFIX`.
+
false
typeenum + `type` selects the flows tracing agent. Possible values are:
- `EBPF` (default) to use NetObserv eBPF agent.
- `IPFIX` - deprecated (*) - to use the legacy IPFIX collector.
`EBPF` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, but they would require manual configuration).
+
+ Enum: EBPF, IPFIX
+ Default: EBPF
+
false
+ + +### FlowCollector.spec.agent.ebpf +[↩ Parent](#flowcollectorspecagent-1) + + + +`ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `EBPF`. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
cacheActiveTimeoutstring + `cacheActiveTimeout` is the max period during which the reporter will aggregate flows before sending. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection.
+
+ Default: 5s
+
false
cacheMaxFlowsinteger + `cacheMaxFlows` is the max number of flows in an aggregate; when reached, the reporter sends the flows. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection.
+
+ Format: int32
+ Default: 100000
+ Minimum: 1
+
false
debugobject + `debug` allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as GOGC and GOMAXPROCS env vars. Users setting its values do it at their own risk.
+
false
enableDNSTrackingboolean + Enable the DNS tracking feature. This feature requires mounting the kernel debug filesystem hence the eBPF pod has to run as privileged. If the spec.agent.eBPF.privileged parameter is not set, an error is reported.
+
+ Default: false
+
false
enablePktDropboolean + Enable the Packets drop flows logging feature. This feature requires mounting the kernel debug filesystem, so the eBPF pod has to run as privileged. If the spec.agent.eBPF.privileged parameter is not set, an error is reported.
+
+ Default: false
+
false
excludeInterfaces[]string + `excludeInterfaces` contains the interface names that will be excluded from flow tracing. An entry is enclosed by slashes, such as `/br-/`, is matched as a regular expression. Otherwise it is matched as a case-sensitive string.
+
+ Default: [lo]
+
false
imagePullPolicyenum + `imagePullPolicy` is the Kubernetes pull policy for the image defined above
+
+ Enum: IfNotPresent, Always, Never
+ Default: IfNotPresent
+
false
interfaces[]string + `interfaces` contains the interface names from where flows will be collected. If empty, the agent will fetch all the interfaces in the system, excepting the ones listed in ExcludeInterfaces. An entry is enclosed by slashes, such as `/br-/`, is matched as a regular expression. Otherwise it is matched as a case-sensitive string.
+
false
kafkaBatchSizeinteger + `kafkaBatchSize` limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 10MB.
+
+ Default: 10485760
+
false
logLevelenum + `logLevel` defines the log level for the NetObserv eBPF Agent
+
+ Enum: trace, debug, info, warn, error, fatal, panic
+ Default: info
+
false
privilegedboolean + Privileged mode for the eBPF Agent container. In general this setting can be ignored or set to false: in that case, the operator will set granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, to enable its correct operation. If for some reason these capabilities cannot be set, such as if an old kernel version not knowing CAP_BPF is in use, then you can turn on this mode for more global privileges.
+
false
resourcesobject + `resources` are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+
+ Default: map[limits:map[memory:800Mi] requests:map[cpu:100m memory:50Mi]]
+
false
samplinginteger + Sampling rate of the flow reporter. 100 means one flow on 100 is sent. 0 or 1 means all flows are sampled.
+
+ Format: int32
+ Default: 50
+ Minimum: 0
+
false
+ + +### FlowCollector.spec.agent.ebpf.debug +[↩ Parent](#flowcollectorspecagentebpf-1) + + + +`debug` allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as GOGC and GOMAXPROCS env vars. Users setting its values do it at their own risk. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
envmap[string]string + `env` allows passing custom environment variables to underlying components. Useful for passing some very concrete performance-tuning options, such as GOGC and GOMAXPROCS, that should not be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug or support scenarios.
+
false
+ + +### FlowCollector.spec.agent.ebpf.resources +[↩ Parent](#flowcollectorspecagentebpf-1) + + + +`resources` are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
limitsmap[string]int or string + Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+
false
requestsmap[string]int or string + Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+
false
+ + +### FlowCollector.spec.agent.ipfix +[↩ Parent](#flowcollectorspecagent-1) + + + +`ipfix` - deprecated (*) - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` is set to `IPFIX`. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
cacheActiveTimeoutstring + `cacheActiveTimeout` is the max period during which the reporter will aggregate flows before sending
+
+ Default: 20s
+
false
cacheMaxFlowsinteger + `cacheMaxFlows` is the max number of flows in an aggregate; when reached, the reporter sends the flows
+
+ Format: int32
+ Default: 400
+ Minimum: 0
+
false
clusterNetworkOperatorobject + `clusterNetworkOperator` defines the settings related to the OpenShift Cluster Network Operator, when available.
+
false
forceSampleAllboolean + `forceSampleAll` allows disabling sampling in the IPFIX-based flow reporter. It is not recommended to sample all the traffic with IPFIX, as it might generate cluster instability. If you REALLY want to do that, set this flag to true. Use at your own risk. When it is set to true, the value of `sampling` is ignored.
+
+ Default: false
+
false
ovnKubernetesobject + `ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.
+
false
samplinginteger + `sampling` is the sampling rate on the reporter. 100 means one flow on 100 is sent. To ensure cluster stability, it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to `forceSampleAll`. Alternatively, you can use the eBPF Agent instead of IPFIX.
+
+ Format: int32
+ Default: 400
+ Minimum: 2
+
false
+ + +### FlowCollector.spec.agent.ipfix.clusterNetworkOperator +[↩ Parent](#flowcollectorspecagentipfix-1) + + + +`clusterNetworkOperator` defines the settings related to the OpenShift Cluster Network Operator, when available. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namespacestring + Namespace where the config map is going to be deployed.
+
+ Default: openshift-network-operator
+
false
+ + +### FlowCollector.spec.agent.ipfix.ovnKubernetes +[↩ Parent](#flowcollectorspecagentipfix-1) + + + +`ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
containerNamestring + `containerName` defines the name of the container to configure for IPFIX.
+
+ Default: ovnkube-node
+
false
daemonSetNamestring + `daemonSetName` defines the name of the DaemonSet controlling the OVN-Kubernetes pods.
+
+ Default: ovnkube-node
+
false
namespacestring + Namespace where OVN-Kubernetes pods are deployed.
+
+ Default: ovn-kubernetes
+
false
+ + +### FlowCollector.spec.consolePlugin +[↩ Parent](#flowcollectorspec-1) + + + +`consolePlugin` defines the settings related to the OpenShift Console plugin, when available. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
autoscalerobject + `autoscaler` spec of a horizontal pod autoscaler to set up for the plugin Deployment.
+
false
enableboolean + enable the console plugin deployment. spec.Loki.enable must also be true
+
+ Default: true
+
false
imagePullPolicyenum + `imagePullPolicy` is the Kubernetes pull policy for the image defined above
+
+ Enum: IfNotPresent, Always, Never
+ Default: IfNotPresent
+
false
logLevelenum + `logLevel` for the console plugin backend
+
+ Enum: trace, debug, info, warn, error, fatal, panic
+ Default: info
+
false
portinteger + `port` is the plugin service port. Do not use 9002, which is reserved for metrics.
+
+ Format: int32
+ Default: 9001
+ Minimum: 1
+ Maximum: 65535
+
false
portNamingobject + `portNaming` defines the configuration of the port-to-service name translation
+
+ Default: map[enable:true]
+
false
quickFilters[]object + `quickFilters` configures quick filter presets for the Console plugin
+
+ Default: [map[default:true filter:map[dst_namespace!:openshift-,netobserv src_namespace!:openshift-,netobserv] name:Applications] map[filter:map[dst_namespace:openshift-,netobserv src_namespace:openshift-,netobserv] name:Infrastructure] map[default:true filter:map[dst_kind:Pod src_kind:Pod] name:Pods network] map[filter:map[dst_kind:Service] name:Services network]]
+
false
registerboolean + `register` allows, when set to true, to automatically register the provided console plugin with the OpenShift Console operator. When set to false, you can still register it manually by editing console.operator.openshift.io/cluster with the following command: `oc patch console.operator.openshift.io cluster --type='json' -p '[{"op": "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]'`
+
+ Default: true
+
false
replicasinteger + `replicas` defines the number of replicas (pods) to start.
+
+ Format: int32
+ Default: 1
+ Minimum: 0
+
false
resourcesobject + `resources`, in terms of compute resources, required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+
+ Default: map[limits:map[memory:100Mi] requests:map[cpu:100m memory:50Mi]]
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler +[↩ Parent](#flowcollectorspecconsoleplugin-1) + + + +`autoscaler` spec of a horizontal pod autoscaler to set up for the plugin Deployment. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
maxReplicasinteger + `maxReplicas` is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.
+
+ Format: int32
+ Default: 3
+
false
metrics[]object + Metrics used by the pod autoscaler
+
false
minReplicasinteger + `minReplicas` is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.
+
+ Format: int32
+
false
statusenum + `status` describes the desired status regarding deploying an horizontal pod autoscaler.
- `DISABLED` will not deploy an horizontal pod autoscaler.
- `ENABLED` will deploy an horizontal pod autoscaler.

+
+ Enum: DISABLED, ENABLED
+ Default: DISABLED
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index] +[↩ Parent](#flowcollectorspecconsolepluginautoscaler-1) + + + +MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once). + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestring + type is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled
+
true
containerResourceobject + containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
+
false
externalobject + external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).
+
false
objectobject + object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
+
false
podsobject + pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
+
false
resourceobject + resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].containerResource +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindex-1) + + + +containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
containerstring + container is the name of the container in the pods of the scaling target
+
true
namestring + name is the name of the resource in question.
+
true
targetobject + target specifies the target value for the given metric
+
true
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].containerResource.target +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexcontainerresource-1) + + + +target specifies the target value for the given metric + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestring + type represents whether the metric type is Utilization, Value, or AverageValue
+
true
averageUtilizationinteger + averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+
+ Format: int32
+
false
averageValueint or string + averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+
false
valueint or string + value is the target value of the metric (as a quantity).
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].external +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindex-1) + + + +external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
metricobject + metric identifies the target metric by name and selector
+
true
targetobject + target specifies the target value for the given metric
+
true
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].external.metric +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexexternal-1) + + + +metric identifies the target metric by name and selector + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestring + name is the name of the given metric
+
true
selectorobject + selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].external.metric.selector +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexexternalmetric-1) + + + +selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]object + matchExpressions is a list of label selector requirements. The requirements are ANDed.
+
false
matchLabelsmap[string]string + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].external.metric.selector.matchExpressions[index] +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexexternalmetricselector-1) + + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystring + key is the label key that the selector applies to.
+
true
operatorstring + operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+
true
values[]string + values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].external.target +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexexternal-1) + + + +target specifies the target value for the given metric + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestring + type represents whether the metric type is Utilization, Value, or AverageValue
+
true
averageUtilizationinteger + averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+
+ Format: int32
+
false
averageValueint or string + averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+
false
valueint or string + value is the target value of the metric (as a quantity).
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].object +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindex-1) + + + +object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object). + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
describedObjectobject + describedObject specifies the descriptions of a object,such as kind,name apiVersion
+
true
metricobject + metric identifies the target metric by name and selector
+
true
targetobject + target specifies the target value for the given metric
+
true
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].object.describedObject +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexobject-1) + + + +describedObject specifies the descriptions of a object,such as kind,name apiVersion + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
kindstring + Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
+
true
namestring + Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
+
true
apiVersionstring + API version of the referent
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].object.metric +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexobject-1) + + + +metric identifies the target metric by name and selector + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestring + name is the name of the given metric
+
true
selectorobject + selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].object.metric.selector +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexobjectmetric-1) + + + +selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]object + matchExpressions is a list of label selector requirements. The requirements are ANDed.
+
false
matchLabelsmap[string]string + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].object.metric.selector.matchExpressions[index] +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexobjectmetricselector-1) + + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystring + key is the label key that the selector applies to.
+
true
operatorstring + operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+
true
values[]string + values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].object.target +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexobject-1) + + + +target specifies the target value for the given metric + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestring + type represents whether the metric type is Utilization, Value, or AverageValue
+
true
averageUtilizationinteger + averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+
+ Format: int32
+
false
averageValueint or string + averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+
false
valueint or string + value is the target value of the metric (as a quantity).
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].pods +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindex-1) + + + +pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
metricobject + metric identifies the target metric by name and selector
+
true
targetobject + target specifies the target value for the given metric
+
true
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].pods.metric +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexpods-1) + + + +metric identifies the target metric by name and selector + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestring + name is the name of the given metric
+
true
selectorobject + selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].pods.metric.selector +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexpodsmetric-1) + + + +selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]object + matchExpressions is a list of label selector requirements. The requirements are ANDed.
+
false
matchLabelsmap[string]string + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].pods.metric.selector.matchExpressions[index] +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexpodsmetricselector-1) + + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystring + key is the label key that the selector applies to.
+
true
operatorstring + operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+
true
values[]string + values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].pods.target +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexpods-1) + + + +target specifies the target value for the given metric + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestring + type represents whether the metric type is Utilization, Value, or AverageValue
+
true
averageUtilizationinteger + averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+
+ Format: int32
+
false
averageValueint or string + averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+
false
valueint or string + value is the target value of the metric (as a quantity).
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].resource +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindex-1) + + + +resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestring + name is the name of the resource in question.
+
true
targetobject + target specifies the target value for the given metric
+
true
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].resource.target +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexresource-1) + + + +target specifies the target value for the given metric + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestring + type represents whether the metric type is Utilization, Value, or AverageValue
+
true
averageUtilizationinteger + averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+
+ Format: int32
+
false
averageValueint or string + averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+
false
valueint or string + value is the target value of the metric (as a quantity).
+
false
+ + +### FlowCollector.spec.consolePlugin.portNaming +[↩ Parent](#flowcollectorspecconsoleplugin-1) + + + +`portNaming` defines the configuration of the port-to-service name translation + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
enableboolean + Enable the console plugin port-to-service name translation
+
+ Default: true
+
false
portNamesmap[string]string + `portNames` defines additional port names to use in the console, for example, `portNames: {"3100": "loki"}`.
+
false
+ + +### FlowCollector.spec.consolePlugin.quickFilters[index] +[↩ Parent](#flowcollectorspecconsoleplugin-1) + + + +`QuickFilter` defines preset configuration for Console's quick filters + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
filtermap[string]string + `filter` is a set of keys and values to be set when this filter is selected. Each key can relate to a list of values using a coma-separated string, for example, `filter: {"src_namespace": "namespace1,namespace2"}`.
+
true
namestring + Name of the filter, that will be displayed in Console
+
true
defaultboolean + `default` defines whether this filter should be active by default or not
+
false
+ + +### FlowCollector.spec.consolePlugin.resources +[↩ Parent](#flowcollectorspecconsoleplugin-1) + + + +`resources`, in terms of compute resources, required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
limitsmap[string]int or string + Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+
false
requestsmap[string]int or string + Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+
false
+ + +### FlowCollector.spec.exporters[index] +[↩ Parent](#flowcollectorspec-1) + + + +`FlowCollectorExporter` defines an additional exporter to send enriched flows to. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typeenum + `type` selects the type of exporters. The available options are `KAFKA` and `IPFIX`. `IPFIX` is unsupported (*).
+
+ Enum: KAFKA, IPFIX
+
true
ipfixobject + IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. Unsupported (*).
+
false
kafkaobject + Kafka configuration, such as the address and topic, to send enriched flows to.
+
false
+ + +### FlowCollector.spec.exporters[index].ipfix +[↩ Parent](#flowcollectorspecexportersindex-1) + + + +IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. Unsupported (*). + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
targetHoststring + Address of the IPFIX external receiver
+
+ Default:
+
true
targetPortinteger + Port for the IPFIX external receiver
+
true
transportenum + Transport protocol (`TCP` or `UDP`) to be used for the IPFIX connection, defaults to `TCP`.
+
+ Enum: TCP, UDP
+
false
+ + +### FlowCollector.spec.exporters[index].kafka +[↩ Parent](#flowcollectorspecexportersindex-1) + + + +Kafka configuration, such as the address and topic, to send enriched flows to. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
addressstring + Address of the Kafka server
+
+ Default:
+
true
topicstring + Kafka topic to use. It must exist, NetObserv will not create it.
+
+ Default:
+
true
saslobject + SASL authentication configuration. Unsupported (*)
+
false
tlsobject + TLS client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093.
+
false
+ + +### FlowCollector.spec.exporters[index].kafka.sasl +[↩ Parent](#flowcollectorspecexportersindexkafka-1) + + + +SASL authentication configuration. Unsupported (*) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
clientIDKeystring + Key for client ID within the provided `reference`
+
false
clientSecretKeystring + Key for client secret within the provided `reference`
+
false
referenceobject + Reference to the secret or config map containing the client ID and secret
+
false
typeenum + Type of SASL authentication to use, or `DISABLED` if SASL is not used
+
+ Enum: DISABLED, PLAIN, SCRAM-SHA512
+ Default: DISABLED
+
false
+ + +### FlowCollector.spec.exporters[index].kafka.sasl.reference +[↩ Parent](#flowcollectorspecexportersindexkafkasasl-1) + + + +Reference to the secret or config map containing the client ID and secret + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestring + Name of the config map or secret to reference
+
false
namespacestring + Namespace of the config map or secret. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+
false
typeenum + Type for the reference: "configmap" or "secret"
+
+ Enum: configmap, secret
+
false
+ + +### FlowCollector.spec.exporters[index].kafka.tls +[↩ Parent](#flowcollectorspecexportersindexkafka-1) + + + +TLS client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
caCertobject + `caCert` defines the reference of the certificate for the Certificate Authority
+
false
enableboolean + Enable TLS
+
+ Default: false
+
false
insecureSkipVerifyboolean + `insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to true, the `caCert` field is ignored.
+
+ Default: false
+
false
userCertobject + `userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)
+
false
+ + +### FlowCollector.spec.exporters[index].kafka.tls.caCert +[↩ Parent](#flowcollectorspecexportersindexkafkatls-1) + + + +`caCert` defines the reference of the certificate for the Certificate Authority + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
certFilestring + `certFile` defines the path to the certificate file name within the config map or secret
+
false
certKeystring + `certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+
false
namestring + Name of the config map or secret containing certificates
+
false
namespacestring + Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+
false
typeenum + Type for the certificate reference: `configmap` or `secret`
+
+ Enum: configmap, secret
+
false
+ + +### FlowCollector.spec.exporters[index].kafka.tls.userCert +[↩ Parent](#flowcollectorspecexportersindexkafkatls-1) + + + +`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
certFilestring + `certFile` defines the path to the certificate file name within the config map or secret
+
false
certKeystring + `certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+
false
namestring + Name of the config map or secret containing certificates
+
false
namespacestring + Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+
false
typeenum + Type for the certificate reference: `configmap` or `secret`
+
+ Enum: configmap, secret
+
false
+ + +### FlowCollector.spec.kafka +[↩ Parent](#flowcollectorspec-1) + + + +Kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the `spec.deploymentModel` is `KAFKA`. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
addressstring + Address of the Kafka server
+
+ Default:
+
true
topicstring + Kafka topic to use. It must exist, NetObserv will not create it.
+
+ Default:
+
true
saslobject + SASL authentication configuration. Unsupported (*)
+
false
tlsobject + TLS client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093.
+
false
+ + +### FlowCollector.spec.kafka.sasl +[↩ Parent](#flowcollectorspeckafka-1) + + + +SASL authentication configuration. Unsupported (*) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
clientIDKeystring + Key for client ID within the provided `reference`
+
false
clientSecretKeystring + Key for client secret within the provided `reference`
+
false
referenceobject + Reference to the secret or config map containing the client ID and secret
+
false
typeenum + Type of SASL authentication to use, or `DISABLED` if SASL is not used
+
+ Enum: DISABLED, PLAIN, SCRAM-SHA512
+ Default: DISABLED
+
false
+ + +### FlowCollector.spec.kafka.sasl.reference +[↩ Parent](#flowcollectorspeckafkasasl-1) + + + +Reference to the secret or config map containing the client ID and secret + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestring + Name of the config map or secret to reference
+
false
namespacestring + Namespace of the config map or secret. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+
false
typeenum + Type for the reference: "configmap" or "secret"
+
+ Enum: configmap, secret
+
false
+ + +### FlowCollector.spec.kafka.tls +[↩ Parent](#flowcollectorspeckafka-1) + + + +TLS client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
caCertobject + `caCert` defines the reference of the certificate for the Certificate Authority
+
false
enableboolean + Enable TLS
+
+ Default: false
+
false
insecureSkipVerifyboolean + `insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to true, the `caCert` field is ignored.
+
+ Default: false
+
false
userCertobject + `userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)
+
false
+ + +### FlowCollector.spec.kafka.tls.caCert +[↩ Parent](#flowcollectorspeckafkatls-1) + + + +`caCert` defines the reference of the certificate for the Certificate Authority + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
certFilestring + `certFile` defines the path to the certificate file name within the config map or secret
+
false
certKeystring + `certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+
false
namestring + Name of the config map or secret containing certificates
+
false
namespacestring + Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+
false
typeenum + Type for the certificate reference: `configmap` or `secret`
+
+ Enum: configmap, secret
+
false
+ + +### FlowCollector.spec.kafka.tls.userCert +[↩ Parent](#flowcollectorspeckafkatls-1) + + + +`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
certFilestring + `certFile` defines the path to the certificate file name within the config map or secret
+
false
certKeystring + `certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+
false
namestring + Name of the config map or secret containing certificates
+
false
namespacestring + Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+
false
typeenum + Type for the certificate reference: `configmap` or `secret`
+
+ Enum: configmap, secret
+
false
+ + +### FlowCollector.spec.loki +[↩ Parent](#flowcollectorspec-1) + + + +loki, the flow store, client settings. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
batchSizeinteger + `batchSize` is the maximum batch size (in bytes) of logs to accumulate before sending.
+
+ Format: int64
+ Default: 102400
+ Minimum: 1
+
false
batchWaitstring + `batchWait` is the maximum time to wait before sending a batch.
+
+ Default: 1s
+
false
enableboolean + enable storing flows to Loki. It is required for the OpenShift Console plugin installation.
+
+ Default: true
+
false
lokiStackobject +
+
false
manualobject +
+
false
maxBackoffstring + `maxBackoff` is the maximum backoff time for client connection between retries.
+
+ Default: 5s
+
false
maxRetriesinteger + `maxRetries` is the maximum number of retries for client connections.
+
+ Format: int32
+ Default: 2
+ Minimum: 0
+
false
minBackoffstring + `minBackoff` is the initial backoff time for client connection between retries.
+
+ Default: 1s
+
false
modeenum +
+
+ Enum: MANUAL, LOKISTACK
+ Default: LOKISTACK
+
false
staticLabelsmap[string]string + `staticLabels` is a map of common labels to set on each flow.
+
+ Default: map[app:netobserv-flowcollector]
+
false
timeoutstring + `timeout` is the maximum time connection / request limit. A timeout of zero means no timeout.
+
+ Default: 10s
+
false
+ + +### FlowCollector.spec.loki.lokiStack +[↩ Parent](#flowcollectorspecloki-1) + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestring +
+
+ Default: loki
+
true
namespacestring +
+
+ Default: netobserv
+
true
+ + +### FlowCollector.spec.loki.manual +[↩ Parent](#flowcollectorspecloki-1) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
authTokenenum + `authToken` describes the way to get a token to authenticate to Loki.
- `DISABLED` will not send any token with the request.
- `FORWARD` will forward the user token for authorization.
- `HOST` - deprecated (*) - will use the local pod service account to authenticate to Loki.
When using the Loki Operator, this must be set to `FORWARD`.
+
+ Enum: DISABLED, HOST, FORWARD
+ Default: DISABLED
+
false
ingesterUrlstring + `ingesterUrl` is the address of an existing Loki service to push the flows to. When using the Loki Operator, set it to the Loki gateway service with the `network` tenant set in path, for example https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network.
+
+ Default: http://loki:3100/
+
false
querierUrlstring + `querierURL` specifies the address of the Loki querier service, in case it is different from the Loki ingester URL. If empty, the URL value will be used (assuming that the Loki ingester and querier are in the same server). When using the Loki Operator, do not set it, since ingestion and queries use the Loki gateway.
+
false
statusTlsobject + TLS client configuration for Loki status URL.
+
false
statusUrlstring + `statusURL` specifies the address of the Loki `/ready`, `/metrics` and `/config` endpoints, in case it is different from the Loki querier URL. If empty, the `querierURL` value will be used. This is useful to show error messages and some context in the frontend. When using the Loki Operator, set it to the Loki HTTP query frontend service, for example https://loki-query-frontend-http.netobserv.svc:3100/. `statusTLS` configuration will be used when `statusUrl` is set.
+
false
tenantIDstring + `tenantID` is the Loki `X-Scope-OrgID` that identifies the tenant for each request. When using the Loki Operator, set it to `network`, which corresponds to a special tenant mode.
+
+ Default: netobserv
+
false
tlsobject + TLS client configuration for Loki URL.
+
false
+ + +### FlowCollector.spec.loki.manual.statusTls +[↩ Parent](#flowcollectorspeclokimanual) + + + +TLS client configuration for Loki status URL. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
caCertobject + `caCert` defines the reference of the certificate for the Certificate Authority
+
false
enableboolean + Enable TLS
+
+ Default: false
+
false
insecureSkipVerifyboolean + `insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to true, the `caCert` field is ignored.
+
+ Default: false
+
false
userCertobject + `userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)
+
false
+ + +### FlowCollector.spec.loki.manual.statusTls.caCert +[↩ Parent](#flowcollectorspeclokimanualstatustls) + + + +`caCert` defines the reference of the certificate for the Certificate Authority + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
certFilestring + `certFile` defines the path to the certificate file name within the config map or secret
+
false
certKeystring + `certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+
false
namestring + Name of the config map or secret containing certificates
+
false
namespacestring + Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+
false
typeenum + Type for the certificate reference: `configmap` or `secret`
+
+ Enum: configmap, secret
+
false
+ + +### FlowCollector.spec.loki.manual.statusTls.userCert +[↩ Parent](#flowcollectorspeclokimanualstatustls) + + + +`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
certFilestring + `certFile` defines the path to the certificate file name within the config map or secret
+
false
certKeystring + `certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+
false
namestring + Name of the config map or secret containing certificates
+
false
namespacestring + Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+
false
typeenum + Type for the certificate reference: `configmap` or `secret`
+
+ Enum: configmap, secret
+
false
+ + +### FlowCollector.spec.loki.manual.tls +[↩ Parent](#flowcollectorspeclokimanual) + + + +TLS client configuration for Loki URL. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
caCertobject + `caCert` defines the reference of the certificate for the Certificate Authority
+
false
enableboolean + Enable TLS
+
+ Default: false
+
false
insecureSkipVerifyboolean + `insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to true, the `caCert` field is ignored.
+
+ Default: false
+
false
userCertobject + `userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)
+
false
+ + +### FlowCollector.spec.loki.manual.tls.caCert +[↩ Parent](#flowcollectorspeclokimanualtls) + + + +`caCert` defines the reference of the certificate for the Certificate Authority + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
certFilestring + `certFile` defines the path to the certificate file name within the config map or secret
+
false
certKeystring + `certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+
false
namestring + Name of the config map or secret containing certificates
+
false
namespacestring + Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+
false
typeenum + Type for the certificate reference: `configmap` or `secret`
+
+ Enum: configmap, secret
+
false
+ + +### FlowCollector.spec.loki.manual.tls.userCert +[↩ Parent](#flowcollectorspeclokimanualtls) + + + +`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
certFilestring + `certFile` defines the path to the certificate file name within the config map or secret
+
false
certKeystring + `certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+
false
namestring + Name of the config map or secret containing certificates
+
false
namespacestring + Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+
false
typeenum + Type for the certificate reference: `configmap` or `secret`
+
+ Enum: configmap, secret
+
false
+ + +### FlowCollector.spec.processor +[↩ Parent](#flowcollectorspec-1) + + + +`processor` defines the settings of the component that receives the flows from the agent, enriches them, generates metrics, and forwards them to the Loki persistence layer and/or any available exporter. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
conversationEndTimeoutstring + `conversationEndTimeout` is the time to wait after a network flow is received, to consider the conversation ended. This delay is ignored when a FIN packet is collected for TCP flows (see `conversationTerminatingTimeout` instead).
+
+ Default: 10s
+
false
conversationHeartbeatIntervalstring + `conversationHeartbeatInterval` is the time to wait between "tick" events of a conversation
+
+ Default: 30s
+
false
conversationTerminatingTimeoutstring + `conversationTerminatingTimeout` is the time to wait from detected FIN flag to end a conversation. Only relevant for TCP flows.
+
+ Default: 5s
+
false
debugobject + `debug` allows setting some aspects of the internal configuration of the flow processor. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as GOGC and GOMAXPROCS env vars. Users setting its values do it at their own risk.
+
false
dropUnusedFieldsboolean + `dropUnusedFields` allows, when set to true, to drop fields that are known to be unused by OVS, to save storage space.
+
+ Default: true
+
false
enableKubeProbesboolean + `enableKubeProbes` is a flag to enable or disable Kubernetes liveness and readiness probes
+
+ Default: true
+
false
healthPortinteger + `healthPort` is a collector HTTP port in the Pod that exposes the health check API
+
+ Format: int32
+ Default: 8080
+ Minimum: 1
+ Maximum: 65535
+
false
imagePullPolicyenum + `imagePullPolicy` is the Kubernetes pull policy for the image defined above
+
+ Enum: IfNotPresent, Always, Never
+ Default: IfNotPresent
+
false
kafkaConsumerAutoscalerobject + `kafkaConsumerAutoscaler` is the spec of a horizontal pod autoscaler to set up for `flowlogs-pipeline-transformer`, which consumes Kafka messages. This setting is ignored when Kafka is disabled.
+
false
kafkaConsumerBatchSizeinteger + `kafkaConsumerBatchSize` indicates to the broker the maximum batch size, in bytes, that the consumer will accept. Ignored when not using Kafka. Default: 10MB.
+
+ Default: 10485760
+
false
kafkaConsumerQueueCapacityinteger + `kafkaConsumerQueueCapacity` defines the capacity of the internal message queue used in the Kafka consumer client. Ignored when not using Kafka.
+
+ Default: 1000
+
false
kafkaConsumerReplicasinteger + `kafkaConsumerReplicas` defines the number of replicas (pods) to start for `flowlogs-pipeline-transformer`, which consumes Kafka messages. This setting is ignored when Kafka is disabled.
+
+ Format: int32
+ Default: 3
+ Minimum: 0
+
false
logLevelenum + `logLevel` of the processor runtime
+
+ Enum: trace, debug, info, warn, error, fatal, panic
+ Default: info
+
false
logTypesenum + `logTypes` defines the desired record types to generate. Possible values are:
- `FLOWS` (default) to export regular network flows
- `CONVERSATIONS` to generate events for started conversations, ended conversations as well as periodic "tick" updates
- `ENDED_CONVERSATIONS` to generate only ended conversations events
- `ALL` to generate both network flows and all conversations events

+
+ Enum: FLOWS, CONVERSATIONS, ENDED_CONVERSATIONS, ALL
+ Default: FLOWS
+
false
metricsobject + `Metrics` define the processor configuration regarding metrics
+
false
portinteger + Port of the flow collector (host port). By convention, some values are forbidden. It must be greater than 1024 and different from 4500, 4789 and 6081.
+
+ Format: int32
+ Default: 2055
+ Minimum: 1025
+ Maximum: 65535
+
false
profilePortinteger + `profilePort` allows setting up a Go pprof profiler listening to this port
+
+ Format: int32
+ Minimum: 0
+ Maximum: 65535
+
false
resourcesobject + `resources` are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+
+ Default: map[limits:map[memory:800Mi] requests:map[cpu:100m memory:100Mi]]
+
false
+ + +### FlowCollector.spec.processor.debug +[↩ Parent](#flowcollectorspecprocessor-1) + + + +`debug` allows setting some aspects of the internal configuration of the flow processor. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as GOGC and GOMAXPROCS env vars. Users setting its values do it at their own risk. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
envmap[string]string + `env` allows passing custom environment variables to underlying components. Useful for passing some very concrete performance-tuning options, such as GOGC and GOMAXPROCS, that should not be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug or support scenarios.
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler +[↩ Parent](#flowcollectorspecprocessor-1) + + + +`kafkaConsumerAutoscaler` is the spec of a horizontal pod autoscaler to set up for `flowlogs-pipeline-transformer`, which consumes Kafka messages. This setting is ignored when Kafka is disabled. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
maxReplicasinteger + `maxReplicas` is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.
+
+ Format: int32
+ Default: 3
+
false
metrics[]object + Metrics used by the pod autoscaler
+
false
minReplicasinteger + `minReplicas` is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.
+
+ Format: int32
+
false
statusenum + `status` describes the desired status regarding deploying an horizontal pod autoscaler.
- `DISABLED` will not deploy an horizontal pod autoscaler.
- `ENABLED` will deploy an horizontal pod autoscaler.

+
+ Enum: DISABLED, ENABLED
+ Default: DISABLED
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index] +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscaler-1) + + + +MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once). + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestring + type is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled
+
true
containerResourceobject + containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
+
false
externalobject + external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).
+
false
objectobject + object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
+
false
podsobject + pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
+
false
resourceobject + resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].containerResource +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindex-1) + + + +containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
containerstring + container is the name of the container in the pods of the scaling target
+
true
namestring + name is the name of the resource in question.
+
true
targetobject + target specifies the target value for the given metric
+
true
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].containerResource.target +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexcontainerresource-1) + + + +target specifies the target value for the given metric + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestring + type represents whether the metric type is Utilization, Value, or AverageValue
+
true
averageUtilizationinteger + averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+
+ Format: int32
+
false
averageValueint or string + averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+
false
valueint or string + value is the target value of the metric (as a quantity).
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].external +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindex-1) + + + +external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
metricobject + metric identifies the target metric by name and selector
+
true
targetobject + target specifies the target value for the given metric
+
true
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].external.metric +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexexternal-1) + + + +metric identifies the target metric by name and selector + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestring + name is the name of the given metric
+
true
selectorobject + selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].external.metric.selector +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexexternalmetric-1) + + + +selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]object + matchExpressions is a list of label selector requirements. The requirements are ANDed.
+
false
matchLabelsmap[string]string + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].external.metric.selector.matchExpressions[index] +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexexternalmetricselector-1) + + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystring + key is the label key that the selector applies to.
+
true
operatorstring + operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+
true
values[]string + values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].external.target +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexexternal-1) + + + +target specifies the target value for the given metric + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestring + type represents whether the metric type is Utilization, Value, or AverageValue
+
true
averageUtilizationinteger + averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+
+ Format: int32
+
false
averageValueint or string + averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+
false
valueint or string + value is the target value of the metric (as a quantity).
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].object +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindex-1) + + + +object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object). + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
describedObjectobject + describedObject specifies the descriptions of a object,such as kind,name apiVersion
+
true
metricobject + metric identifies the target metric by name and selector
+
true
targetobject + target specifies the target value for the given metric
+
true
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].object.describedObject +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexobject-1) + + + +describedObject specifies the descriptions of a object,such as kind,name apiVersion + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
kindstring + Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
+
true
namestring + Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
+
true
apiVersionstring + API version of the referent
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].object.metric +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexobject-1) + + + +metric identifies the target metric by name and selector + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestring + name is the name of the given metric
+
true
selectorobject + selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].object.metric.selector +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexobjectmetric-1) + + + +selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]object + matchExpressions is a list of label selector requirements. The requirements are ANDed.
+
false
matchLabelsmap[string]string + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].object.metric.selector.matchExpressions[index] +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexobjectmetricselector-1) + + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystring + key is the label key that the selector applies to.
+
true
operatorstring + operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+
true
values[]string + values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].object.target +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexobject-1) + + + +target specifies the target value for the given metric + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestring + type represents whether the metric type is Utilization, Value, or AverageValue
+
true
averageUtilizationinteger + averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+
+ Format: int32
+
false
averageValueint or string + averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+
false
valueint or string + value is the target value of the metric (as a quantity).
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].pods +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindex-1) + + + +pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
metricobject + metric identifies the target metric by name and selector
+
true
targetobject + target specifies the target value for the given metric
+
true
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].pods.metric +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexpods-1) + + + +metric identifies the target metric by name and selector + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestring + name is the name of the given metric
+
true
selectorobject + selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].pods.metric.selector +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexpodsmetric-1) + + + +selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]object + matchExpressions is a list of label selector requirements. The requirements are ANDed.
+
false
matchLabelsmap[string]string + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].pods.metric.selector.matchExpressions[index] +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexpodsmetricselector-1) + + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystring + key is the label key that the selector applies to.
+
true
operatorstring + operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+
true
values[]string + values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].pods.target +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexpods-1) + + + +target specifies the target value for the given metric + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestring + type represents whether the metric type is Utilization, Value, or AverageValue
+
true
averageUtilizationinteger + averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+
+ Format: int32
+
false
averageValueint or string + averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+
false
valueint or string + value is the target value of the metric (as a quantity).
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].resource +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindex-1) + + + +resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestring + name is the name of the resource in question.
+
true
targetobject + target specifies the target value for the given metric
+
true
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].resource.target +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexresource-1) + + + +target specifies the target value for the given metric + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestring + type represents whether the metric type is Utilization, Value, or AverageValue
+
true
averageUtilizationinteger + averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+
+ Format: int32
+
false
averageValueint or string + averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+
false
valueint or string + value is the target value of the metric (as a quantity).
+
false
+ + +### FlowCollector.spec.processor.metrics +[↩ Parent](#flowcollectorspecprocessor-1) + + + +`Metrics` define the processor configuration regarding metrics + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
disableAlerts[]enum + `disableAlerts` is a list of alerts that should be disabled. Possible values are:
`NetObservNoFlows`, which is triggered when no flows are being observed for a certain period.
`NetObservLokiError`, which is triggered when flows are being dropped due to Loki errors.

+
false
ignoreTags[]string + `ignoreTags` is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . Available tags are: `egress`, `ingress`, `flows`, `bytes`, `packets`, `namespaces`, `nodes`, `workloads`.
+
+ Default: [egress packets]
+
false
serverobject + Metrics server endpoint configuration for Prometheus scraper
+
false
+ + +### FlowCollector.spec.processor.metrics.server +[↩ Parent](#flowcollectorspecprocessormetrics-1) + + + +Metrics server endpoint configuration for Prometheus scraper + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portinteger + The prometheus HTTP port
+
+ Format: int32
+ Default: 9102
+ Minimum: 1
+ Maximum: 65535
+
false
tlsobject + TLS configuration.
+
false
+ + +### FlowCollector.spec.processor.metrics.server.tls +[↩ Parent](#flowcollectorspecprocessormetricsserver-1) + + + +TLS configuration. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
providedobject + TLS configuration when `type` is set to `PROVIDED`.
+
false
typeenum + Select the type of TLS configuration:
- `DISABLED` (default) to not configure TLS for the endpoint. - `PROVIDED` to manually provide cert file and a key file. - `AUTO` to use OpenShift auto generated certificate using annotations.
+
+ Enum: DISABLED, PROVIDED, AUTO
+ Default: DISABLED
+
false
+ + +### FlowCollector.spec.processor.metrics.server.tls.provided +[↩ Parent](#flowcollectorspecprocessormetricsservertls-1) + + + +TLS configuration when `type` is set to `PROVIDED`. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
certFilestring + `certFile` defines the path to the certificate file name within the config map or secret
+
false
certKeystring + `certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+
false
namestring + Name of the config map or secret containing certificates
+
false
namespacestring + Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+
false
typeenum + Type for the certificate reference: `configmap` or `secret`
+
+ Enum: configmap, secret
+
false
+ + +### FlowCollector.spec.processor.resources +[↩ Parent](#flowcollectorspecprocessor-1) + + + +`resources` are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
limitsmap[string]int or string + Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+
false
requestsmap[string]int or string + Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+
false
+ + +### FlowCollector.status +[↩ Parent](#flowcollector-1) + + + +`FlowCollectorStatus` defines the observed state of FlowCollector + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
conditions[]object + `conditions` represent the latest available observations of an object's state
+
true
namespacestring + Namespace where console plugin and flowlogs-pipeline have been deployed.
+
false
+ + +### FlowCollector.status.conditions[index] +[↩ Parent](#flowcollectorstatus-1) + + + +Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + // other fields } + diff --git a/main.go b/main.go index 26b73685c..b33ee6052 100644 --- a/main.go +++ b/main.go @@ -48,6 +48,7 @@ import ( flowsv1alpha1 "github.com/netobserv/network-observability-operator/api/v1alpha1" flowsv1beta1 "github.com/netobserv/network-observability-operator/api/v1beta1" + flowsv1beta2 "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers" "github.com/netobserv/network-observability-operator/controllers/constants" //+kubebuilder:scaffold:imports @@ -66,6 +67,7 @@ func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(flowsv1alpha1.AddToScheme(scheme)) utilruntime.Must(flowsv1beta1.AddToScheme(scheme)) + utilruntime.Must(flowsv1beta2.AddToScheme(scheme)) utilruntime.Must(corev1.AddToScheme(scheme)) utilruntime.Must(ascv2.AddToScheme(scheme)) utilruntime.Must(osv1alpha1.AddToScheme(scheme)) @@ -140,7 +142,7 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "FlowCollector") os.Exit(1) } - if err = (&flowsv1beta1.FlowCollector{}).SetupWebhookWithManager(mgr); err != nil { + if err = (&flowsv1beta2.FlowCollector{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create v1beta1 webhook", "webhook", "FlowCollector") os.Exit(1) } diff --git a/pkg/helper/comparators.go b/pkg/helper/comparators.go index d958ace0b..6eb28c11f 100644 --- a/pkg/helper/comparators.go +++ b/pkg/helper/comparators.go @@ -10,7 +10,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" ) diff --git a/pkg/helper/flowcollector.go b/pkg/helper/flowcollector.go index 895b38559..b671134c4 100644 --- a/pkg/helper/flowcollector.go +++ b/pkg/helper/flowcollector.go @@ -3,8 +3,10 @@ package helper import ( "strings" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" + "github.com/netobserv/network-observability-operator/pkg/volumes" + promConfig "github.com/prometheus/common/config" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -42,22 +44,22 @@ func HPAEnabled(spec *flowslatest.FlowCollectorHPA) bool { } func LokiNoAuthToken(spec *flowslatest.FlowCollectorLoki) bool { - return spec.AuthToken == flowslatest.LokiAuthDisabled + return spec.Manual.AuthToken == flowslatest.LokiAuthDisabled } func LokiUseHostToken(spec *flowslatest.FlowCollectorLoki) bool { - return spec.AuthToken == flowslatest.LokiAuthUseHostToken + return spec.Manual.AuthToken == flowslatest.LokiAuthUseHostToken } func LokiForwardUserToken(spec *flowslatest.FlowCollectorLoki) bool { - return spec.AuthToken == flowslatest.LokiAuthForwardUserToken + return spec.Manual.AuthToken == flowslatest.LokiAuthForwardUserToken } func GetLokiStatusTLS(spec *flowslatest.FlowCollectorLoki) flowslatest.ClientTLS { - if spec.StatusURL != "" { - return spec.StatusTLS + if spec.Manual.StatusURL != "" { + return spec.Manual.StatusTLS } - return spec.TLS + return spec.Manual.TLS } func GetRecordTypes(processor *flowslatest.FlowCollectorFLP) []string { @@ -99,6 +101,91 @@ func UseLoki(spec *flowslatest.FlowCollectorSpec) bool { return spec.Loki.Enable == nil || *spec.Loki.Enable } +func LokiModeLokiStack(spec *flowslatest.FlowCollectorLoki) bool { + return spec.Mode == "LOKISTACK" +} + +func LokiIngesterURL(spec *flowslatest.FlowCollectorLoki) string { + + switch spec.Mode { + case "MANUAL": + { + return spec.Manual.IngesterURL + } + case "LOKISTACK": + { + return "https://" + spec.LokiStack.Name + "-gateway-http.netobserv.svc:8080/api/logs/v1/network/" + } + default: + return "http://loki:3100/" + } +} + +func LokiTenantID(spec *flowslatest.FlowCollectorLoki) string { + switch spec.Mode { + case "MANUAL": + { + return spec.Manual.TenantID + } + case "LOKISTACK": + { + return "network" + } + default: + return "netobserv" + } +} + +func LokiTLSClient(spec *flowslatest.FlowCollectorLoki, authorization *promConfig.Authorization, vol *volumes.Builder) *promConfig.HTTPClientConfig { + + switch spec.Mode { + + case "MANUAL": + { + if spec.Manual.TLS.Enable { + if spec.Manual.TLS.InsecureSkipVerify { + return &promConfig.HTTPClientConfig{ + Authorization: authorization, + TLSConfig: promConfig.TLSConfig{ + InsecureSkipVerify: true, + }, + } + } + caPath := vol.AddCACertificate(&spec.Manual.TLS, "loki-certs") + return &promConfig.HTTPClientConfig{ + Authorization: authorization, + TLSConfig: promConfig.TLSConfig{ + CAFile: caPath, + }, + } + + } + return &promConfig.HTTPClientConfig{ + Authorization: authorization, + } + } + case "LOKISTACK": + { + certRef := flowslatest.CertificateReference{ + Type: flowslatest.RefTypeConfigMap, + Name: spec.LokiStack.Name + "-gateway-ca-bundle", + CertFile: "service-ca.crt", + } + clientTLS := &flowslatest.ClientTLS{ + CACert: certRef, + } + caPath := vol.AddCACertificate(clientTLS, "loki-certs") + return &promConfig.HTTPClientConfig{ + Authorization: authorization, + TLSConfig: promConfig.TLSConfig{ + CAFile: caPath, + }} + } + } + + return nil +} + func UseConsolePlugin(spec *flowslatest.FlowCollectorSpec) bool { return UseLoki(spec) && // nil should fallback to default value, which is "true" diff --git a/pkg/volumes/builder.go b/pkg/volumes/builder.go index 305b1538b..448368914 100644 --- a/pkg/volumes/builder.go +++ b/pkg/volumes/builder.go @@ -6,7 +6,7 @@ import ( corev1 "k8s.io/api/core/v1" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" ) diff --git a/pkg/watchers/object_ref.go b/pkg/watchers/object_ref.go index 1eb4f925f..386f1c4a9 100644 --- a/pkg/watchers/object_ref.go +++ b/pkg/watchers/object_ref.go @@ -1,7 +1,7 @@ package watchers import ( - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" ) type objectRef struct { diff --git a/pkg/watchers/watcher.go b/pkg/watchers/watcher.go index 4b6f31b9d..340260a25 100644 --- a/pkg/watchers/watcher.go +++ b/pkg/watchers/watcher.go @@ -13,7 +13,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" rec "sigs.k8s.io/controller-runtime/pkg/reconcile" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/pkg/helper" ) diff --git a/pkg/watchers/watcher_test.go b/pkg/watchers/watcher_test.go index 8b2474caa..94521ffa3 100644 --- a/pkg/watchers/watcher_test.go +++ b/pkg/watchers/watcher_test.go @@ -4,7 +4,7 @@ import ( "context" "testing" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/pkg/helper" "github.com/netobserv/network-observability-operator/pkg/test" "github.com/stretchr/testify/assert" From d06fc258c2e8e2ab551dc76d32a47b9c5e51cfb8 Mon Sep 17 00:00:00 2001 From: Julien Pinsonneau Date: Thu, 3 Aug 2023 14:23:48 +0200 Subject: [PATCH 02/17] flp & console plugin helpers --- api/v1beta2/flowcollector_types.go | 12 +- ...flows_v1beta2_flowcollector_lokistack.yaml | 25 +++ .../consoleplugin/consoleplugin_objects.go | 17 +- .../consoleplugin/consoleplugin_reconciler.go | 21 +-- .../flowlogspipeline/flp_common_objects.go | 26 ++- .../flp_monolith_reconciler.go | 2 +- .../flp_transfo_reconciler.go | 3 +- pkg/helper/flowcollector.go | 177 ++++++++++-------- 8 files changed, 173 insertions(+), 110 deletions(-) diff --git a/api/v1beta2/flowcollector_types.go b/api/v1beta2/flowcollector_types.go index c8c69caf2..5058b6afd 100644 --- a/api/v1beta2/flowcollector_types.go +++ b/api/v1beta2/flowcollector_types.go @@ -545,15 +545,25 @@ type LokiStack struct { Namespace string `json:"namespace"` } +const ( + LokiModeManual = "MANUAL" + LokiModeLokiStack = "LOKISTACK" +) + // FlowCollectorLoki defines the desired state for FlowCollector's Loki client. type FlowCollectorLoki struct { - //+kubebuilder:validation:Enum=MANUAL;LOKISTACK //+kubebuilder:default:="LOKISTACK" Mode string `json:"mode,omitempty"` + // Loki configuration for MANUAL mode. This is the more flexible configuration. + // It will be ignored for other mods + // +optional Manual LokiManualParams `json:"manual,omitempty"` + // Loki configuration for LOKISTACK mode. This is usefull for an easy loki-operator config. + // It will be ignored for other mods + // +optional LokiStack LokiStack `json:"lokiStack,omitempty"` //+kubebuilder:default:=true diff --git a/config/samples/flows_v1beta2_flowcollector_lokistack.yaml b/config/samples/flows_v1beta2_flowcollector_lokistack.yaml index c4b05e2ac..6c6667e9b 100644 --- a/config/samples/flows_v1beta2_flowcollector_lokistack.yaml +++ b/config/samples/flows_v1beta2_flowcollector_lokistack.yaml @@ -123,6 +123,31 @@ spec: # targetPort: 4739 # transport: TCP or UDP (optional - defaults to TCP) --- +apiVersion: loki.grafana.com/v1 +kind: LokiStack +metadata: + name: loki + namespace: netobserv +spec: + tenants: + mode: openshift-network + managementState: Managed + limits: + global: + queries: + queryTimeout: 3m + storage: + schemas: + - effectiveDate: '2020-10-11' + version: v11 + secret: + name: loki-secret + type: s3 + hashRing: + type: memberlist + size: 1x.extra-small + storageClassName: gp3-csi +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: diff --git a/controllers/consoleplugin/consoleplugin_objects.go b/controllers/consoleplugin/consoleplugin_objects.go index e9418b187..7797eaeef 100644 --- a/controllers/consoleplugin/consoleplugin_objects.go +++ b/controllers/consoleplugin/consoleplugin_objects.go @@ -161,8 +161,8 @@ func (b *builder) deployment(cmDigest string) *appsv1.Deployment { } func (b *builder) buildArgs(desired *flowslatest.FlowCollectorSpec) []string { - querierURL := querierURL(&desired.Loki) - statusURL := statusURL(&desired.Loki) + querierURL := helper.LokiQuerierURL(&desired.Loki) + statusURL := helper.LokiStatusURL(&desired.Loki) // check for connection traking to list indexes indexFields := constants.LokiIndexFields @@ -175,7 +175,7 @@ func (b *builder) buildArgs(desired *flowslatest.FlowCollectorSpec) []string { "-key", "/var/serving-cert/tls.key", "-loki", querierURL, "-loki-labels", strings.Join(indexFields, ","), - "-loki-tenant-id", desired.Loki.Manual.TenantID, + "-loki-tenant-id", helper.LokiTenantID(&desired.Loki), "-loglevel", desired.ConsolePlugin.LogLevel, "-frontend-config", filepath.Join(configPath, configFile), } @@ -188,23 +188,24 @@ func (b *builder) buildArgs(desired *flowslatest.FlowCollectorSpec) []string { args = append(args, "-loki-status", statusURL) } - if desired.Loki.Manual.TLS.Enable { - if desired.Loki.Manual.TLS.InsecureSkipVerify { + clientTLS := helper.LokiTLS(&desired.Loki) + if clientTLS.Enable { + if clientTLS.InsecureSkipVerify { args = append(args, "-loki-skip-tls") } else { - caPath := b.volumes.AddCACertificate(&desired.Loki.Manual.TLS, "loki-certs") + caPath := b.volumes.AddCACertificate(clientTLS, "loki-certs") if caPath != "" { args = append(args, "-loki-ca-path", caPath) } } } - statusTLS := helper.GetLokiStatusTLS(&desired.Loki) + statusTLS := helper.LokiStatusTLS(&desired.Loki) if statusTLS.Enable { if statusTLS.InsecureSkipVerify { args = append(args, "-loki-status-skip-tls") } else { - statusCaPath, userCertPath, userKeyPath := b.volumes.AddMutualTLSCertificates(&statusTLS, "loki-status-certs") + statusCaPath, userCertPath, userKeyPath := b.volumes.AddMutualTLSCertificates(statusTLS, "loki-status-certs") if statusCaPath != "" { args = append(args, "-loki-status-ca-path", statusCaPath) } diff --git a/controllers/consoleplugin/consoleplugin_reconciler.go b/controllers/consoleplugin/consoleplugin_reconciler.go index 2a9a235c9..a2d8cddcd 100644 --- a/controllers/consoleplugin/consoleplugin_reconciler.go +++ b/controllers/consoleplugin/consoleplugin_reconciler.go @@ -109,12 +109,15 @@ func (r *CPReconciler) Reconcile(ctx context.Context, desired *flowslatest.FlowC if err = r.reconcileHPA(ctx, &builder, &desired.Spec); err != nil { return err } + // Watch for Loki certificates if necessary; we'll ignore in that case the returned digest, as we don't need to restart pods on cert rotation // because certificate is always reloaded from file - if _, err = r.Watcher.ProcessCACert(ctx, r.Client, &desired.Spec.Loki.Manual.TLS, r.Namespace); err != nil { + clientTLS := helper.LokiTLS(&desired.Spec.Loki) + if _, err = r.Watcher.ProcessCACert(ctx, r.Client, clientTLS, r.Namespace); err != nil { return err } - if _, _, err = r.Watcher.ProcessMTLSCerts(ctx, r.Client, &desired.Spec.Loki.Manual.StatusTLS, r.Namespace); err != nil { + statusTLS := helper.LokiStatusTLS(&desired.Spec.Loki) + if _, _, err = r.Watcher.ProcessMTLSCerts(ctx, r.Client, statusTLS, r.Namespace); err != nil { return err } } else { @@ -264,17 +267,3 @@ func (r *CPReconciler) reconcileHPA(ctx context.Context, builder *builder, desir func pluginNeedsUpdate(plg *osv1alpha1.ConsolePlugin, desired *pluginSpec) bool { return plg.Spec.Service.Port != desired.Port } - -func querierURL(loki *flowslatest.FlowCollectorLoki) string { - if loki.Manual.QuerierURL != "" { - return loki.Manual.QuerierURL - } - return loki.Manual.IngesterURL -} - -func statusURL(loki *flowslatest.FlowCollectorLoki) string { - if loki.Manual.StatusURL != "" { - return loki.Manual.StatusURL - } - return querierURL(loki) -} diff --git a/controllers/flowlogspipeline/flp_common_objects.go b/controllers/flowlogspipeline/flp_common_objects.go index bbe5ff2df..9fe8b0435 100644 --- a/controllers/flowlogspipeline/flp_common_objects.go +++ b/controllers/flowlogspipeline/flp_common_objects.go @@ -327,7 +327,7 @@ func (b *builder) addTransformStages(stage *config.PipelineBuilderStage) error { } var authorization *promConfig.Authorization - if helper.LokiUseHostToken(&b.desired.Loki) || helper.LokiForwardUserToken(&b.desired.Loki) || helper.LokiModeLokiStack(&b.desired.Loki) { + if helper.LokiUseHostToken(&b.desired.Loki) || helper.LokiForwardUserToken(&b.desired.Loki) { b.volumes.AddToken(constants.FLPName) authorization = &promConfig.Authorization{ Type: "Bearer", @@ -335,7 +335,29 @@ func (b *builder) addTransformStages(stage *config.PipelineBuilderStage) error { } } - lokiWrite.ClientConfig = helper.LokiTLSClient(&b.desired.Loki, authorization, &b.volumes) + clientTLS := helper.LokiTLS(&b.desired.Loki) + if clientTLS.Enable { + if clientTLS.InsecureSkipVerify { + lokiWrite.ClientConfig = &promConfig.HTTPClientConfig{ + Authorization: authorization, + TLSConfig: promConfig.TLSConfig{ + InsecureSkipVerify: true, + }, + } + } else { + caPath := b.volumes.AddCACertificate(clientTLS, "loki-certs") + lokiWrite.ClientConfig = &promConfig.HTTPClientConfig{ + Authorization: authorization, + TLSConfig: promConfig.TLSConfig{ + CAFile: caPath, + }, + } + } + } else { + lokiWrite.ClientConfig = &promConfig.HTTPClientConfig{ + Authorization: authorization, + } + } enrichedStage.WriteLoki("loki", lokiWrite) } diff --git a/controllers/flowlogspipeline/flp_monolith_reconciler.go b/controllers/flowlogspipeline/flp_monolith_reconciler.go index 585f692a0..01b76df97 100644 --- a/controllers/flowlogspipeline/flp_monolith_reconciler.go +++ b/controllers/flowlogspipeline/flp_monolith_reconciler.go @@ -116,7 +116,7 @@ func (r *flpMonolithReconciler) reconcile(ctx context.Context, desired *flowslat // Watch for Loki certificate if necessary; we'll ignore in that case the returned digest, as we don't need to restart pods on cert rotation // because certificate is always reloaded from file - if _, err = r.Watcher.ProcessCACert(ctx, r.Client, &desired.Spec.Loki.Manual.TLS, r.Namespace); err != nil { + if _, err = r.Watcher.ProcessCACert(ctx, r.Client, helper.LokiTLS(&desired.Spec.Loki), r.Namespace); err != nil { return err } diff --git a/controllers/flowlogspipeline/flp_transfo_reconciler.go b/controllers/flowlogspipeline/flp_transfo_reconciler.go index 36935d857..bbf95526e 100644 --- a/controllers/flowlogspipeline/flp_transfo_reconciler.go +++ b/controllers/flowlogspipeline/flp_transfo_reconciler.go @@ -116,7 +116,8 @@ func (r *flpTransformerReconciler) reconcile(ctx context.Context, desired *flows // Watch for Loki certificate if necessary; we'll ignore in that case the returned digest, as we don't need to restart pods on cert rotation // because certificate is always reloaded from file - if _, err = r.Watcher.ProcessCACert(ctx, r.Client, &desired.Spec.Loki.Manual.TLS, r.Namespace); err != nil { + clientTLS := helper.LokiTLS(&desired.Spec.Loki) + if _, err = r.Watcher.ProcessCACert(ctx, r.Client, clientTLS, r.Namespace); err != nil { return err } diff --git a/pkg/helper/flowcollector.go b/pkg/helper/flowcollector.go index b671134c4..78369f217 100644 --- a/pkg/helper/flowcollector.go +++ b/pkg/helper/flowcollector.go @@ -5,8 +5,6 @@ import ( flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" - "github.com/netobserv/network-observability-operator/pkg/volumes" - promConfig "github.com/prometheus/common/config" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -20,12 +18,15 @@ func GetSampling(spec *flowslatest.FlowCollectorSpec) int { func UseEBPF(spec *flowslatest.FlowCollectorSpec) bool { return spec.Agent.Type == flowslatest.AgentEBPF } + func UseIPFIX(spec *flowslatest.FlowCollectorSpec) bool { return spec.Agent.Type == flowslatest.AgentIPFIX } + func UseKafka(spec *flowslatest.FlowCollectorSpec) bool { return spec.DeploymentModel == flowslatest.DeploymentModelKafka } + func HasKafkaExporter(spec *flowslatest.FlowCollectorSpec) bool { for _, ex := range spec.Exporters { if ex.Type == flowslatest.KafkaExporter { @@ -43,25 +44,6 @@ func HPAEnabled(spec *flowslatest.FlowCollectorHPA) bool { return spec.Status == flowslatest.HPAStatusEnabled } -func LokiNoAuthToken(spec *flowslatest.FlowCollectorLoki) bool { - return spec.Manual.AuthToken == flowslatest.LokiAuthDisabled -} - -func LokiUseHostToken(spec *flowslatest.FlowCollectorLoki) bool { - return spec.Manual.AuthToken == flowslatest.LokiAuthUseHostToken -} - -func LokiForwardUserToken(spec *flowslatest.FlowCollectorLoki) bool { - return spec.Manual.AuthToken == flowslatest.LokiAuthForwardUserToken -} - -func GetLokiStatusTLS(spec *flowslatest.FlowCollectorLoki) flowslatest.ClientTLS { - if spec.Manual.StatusURL != "" { - return spec.Manual.StatusTLS - } - return spec.Manual.TLS -} - func GetRecordTypes(processor *flowslatest.FlowCollectorFLP) []string { outputRecordTypes := []string{constants.FlowLogType} if processor.LogTypes != nil { @@ -101,89 +83,122 @@ func UseLoki(spec *flowslatest.FlowCollectorSpec) bool { return spec.Loki.Enable == nil || *spec.Loki.Enable } -func LokiModeLokiStack(spec *flowslatest.FlowCollectorLoki) bool { - return spec.Mode == "LOKISTACK" +func LokiNoAuthToken(spec *flowslatest.FlowCollectorLoki) bool { + switch spec.Mode { + case flowslatest.LokiModeLokiStack: + return false + default: + return spec.Manual.AuthToken == flowslatest.LokiAuthDisabled + } } -func LokiIngesterURL(spec *flowslatest.FlowCollectorLoki) string { - +func LokiUseHostToken(spec *flowslatest.FlowCollectorLoki) bool { switch spec.Mode { - case "MANUAL": - { - return spec.Manual.IngesterURL - } - case "LOKISTACK": - { - return "https://" + spec.LokiStack.Name + "-gateway-http.netobserv.svc:8080/api/logs/v1/network/" - } + case flowslatest.LokiModeLokiStack: + return false default: - return "http://loki:3100/" + return spec.Manual.AuthToken == flowslatest.LokiAuthUseHostToken } } -func LokiTenantID(spec *flowslatest.FlowCollectorLoki) string { +func LokiForwardUserToken(spec *flowslatest.FlowCollectorLoki) bool { switch spec.Mode { - case "MANUAL": - { - return spec.Manual.TenantID - } - case "LOKISTACK": - { - return "network" - } + case flowslatest.LokiModeLokiStack: + return true default: - return "netobserv" + return spec.Manual.AuthToken == flowslatest.LokiAuthForwardUserToken } } -func LokiTLSClient(spec *flowslatest.FlowCollectorLoki, authorization *promConfig.Authorization, vol *volumes.Builder) *promConfig.HTTPClientConfig { +func lokiStackGatewayURL(spec *flowslatest.FlowCollectorLoki) string { + return "https://" + spec.LokiStack.Name + "-gateway-http." + spec.LokiStack.Namespace + ".svc:8080/api/logs/v1/network/" +} +func LokiIngesterURL(spec *flowslatest.FlowCollectorLoki) string { switch spec.Mode { + case flowslatest.LokiModeLokiStack: + return lokiStackGatewayURL(spec) + default: + return spec.Manual.IngesterURL + } +} - case "MANUAL": - { - if spec.Manual.TLS.Enable { - if spec.Manual.TLS.InsecureSkipVerify { - return &promConfig.HTTPClientConfig{ - Authorization: authorization, - TLSConfig: promConfig.TLSConfig{ - InsecureSkipVerify: true, - }, - } - } - caPath := vol.AddCACertificate(&spec.Manual.TLS, "loki-certs") - return &promConfig.HTTPClientConfig{ - Authorization: authorization, - TLSConfig: promConfig.TLSConfig{ - CAFile: caPath, - }, - } +func LokiQuerierURL(spec *flowslatest.FlowCollectorLoki) string { + switch spec.Mode { + case flowslatest.LokiModeLokiStack: + return lokiStackGatewayURL(spec) + default: + if spec.Manual.QuerierURL != "" { + return spec.Manual.QuerierURL + } + return spec.Manual.IngesterURL + } +} - } - return &promConfig.HTTPClientConfig{ - Authorization: authorization, - } +func LokiStatusURL(spec *flowslatest.FlowCollectorLoki) string { + switch spec.Mode { + case flowslatest.LokiModeLokiStack: + return "https://" + spec.LokiStack.Name + "-query-frontend-http." + spec.LokiStack.Namespace + ".svc:3100/" + default: + if spec.Manual.StatusURL != "" { + return spec.Manual.StatusURL } - case "LOKISTACK": - { - certRef := flowslatest.CertificateReference{ + return LokiQuerierURL(spec) + } +} + +func LokiTenantID(spec *flowslatest.FlowCollectorLoki) string { + switch spec.Mode { + case flowslatest.LokiModeLokiStack: + return "network" + default: + return spec.Manual.TenantID + } +} + +func LokiTLS(spec *flowslatest.FlowCollectorLoki) *flowslatest.ClientTLS { + switch spec.Mode { + case flowslatest.LokiModeLokiStack: + clientTLS := &flowslatest.ClientTLS{ + Enable: true, + CACert: flowslatest.CertificateReference{ Type: flowslatest.RefTypeConfigMap, Name: spec.LokiStack.Name + "-gateway-ca-bundle", CertFile: "service-ca.crt", - } - clientTLS := &flowslatest.ClientTLS{ - CACert: certRef, - } - caPath := vol.AddCACertificate(clientTLS, "loki-certs") - return &promConfig.HTTPClientConfig{ - Authorization: authorization, - TLSConfig: promConfig.TLSConfig{ - CAFile: caPath, - }} + }, + InsecureSkipVerify: false, } + return clientTLS + default: + return &spec.Manual.TLS } +} - return nil +func LokiStatusTLS(spec *flowslatest.FlowCollectorLoki) *flowslatest.ClientTLS { + switch spec.Mode { + case flowslatest.LokiModeLokiStack: + clientTLS := &flowslatest.ClientTLS{ + Enable: true, + CACert: flowslatest.CertificateReference{ + Type: flowslatest.RefTypeConfigMap, + Name: spec.LokiStack.Name + "-ca-bundle", + CertFile: "service-ca.crt", + }, + InsecureSkipVerify: false, + UserCert: flowslatest.CertificateReference{ + Type: flowslatest.RefTypeSecret, + Name: spec.LokiStack.Name + "-query-frontend-http", + CertFile: "tls.crt", + CertKey: "tls.key", + }, + } + return clientTLS + default: + if spec.Manual.StatusURL != "" { + return &spec.Manual.StatusTLS + } + return &spec.Manual.TLS + } } func UseConsolePlugin(spec *flowslatest.FlowCollectorSpec) bool { From c754d39f2e2711ab4c8259869685c4ca8c298735 Mon Sep 17 00:00:00 2001 From: Julien Pinsonneau Date: Thu, 3 Aug 2023 15:18:06 +0200 Subject: [PATCH 03/17] manual mode as default + optionnal lokistack --- api/v1beta2/flowcollector_types.go | 10 +- api/v1beta2/zz_generated.deepcopy.go | 6 +- .../flows.netobserv.io_flowcollectors.yaml | 1037 ++++++++++------- .../flows.netobserv.io_flowcollectors.yaml | 10 +- .../flowcollector_controller_iso_test.go | 1 + docs/FlowCollector.md | 14 +- pkg/helper/flowcollector.go | 25 +- 7 files changed, 631 insertions(+), 472 deletions(-) diff --git a/api/v1beta2/flowcollector_types.go b/api/v1beta2/flowcollector_types.go index 5058b6afd..9af30383d 100644 --- a/api/v1beta2/flowcollector_types.go +++ b/api/v1beta2/flowcollector_types.go @@ -492,6 +492,7 @@ const ( LokiAuthForwardUserToken = "FORWARD" ) +// LokiManualParams defines the parameters to connect loki type LokiManualParams struct { //+kubebuilder:default:="http://loki:3100/" // `ingesterUrl` is the address of an existing Loki service to push the flows to. When using the Loki Operator, @@ -538,11 +539,12 @@ type LokiManualParams struct { StatusTLS ClientTLS `json:"statusTls"` } +// LokiStack defines the name and namespace of the loki-operator instance type LokiStack struct { //+kubebuilder:default:="loki" - Name string `json:"name"` + Name string `json:"name,omitempty"` //+kubebuilder:default:="netobserv" - Namespace string `json:"namespace"` + Namespace string `json:"namespace,omitempty"` } const ( @@ -553,7 +555,7 @@ const ( // FlowCollectorLoki defines the desired state for FlowCollector's Loki client. type FlowCollectorLoki struct { //+kubebuilder:validation:Enum=MANUAL;LOKISTACK - //+kubebuilder:default:="LOKISTACK" + //+kubebuilder:default:="MANUAL" Mode string `json:"mode,omitempty"` // Loki configuration for MANUAL mode. This is the more flexible configuration. @@ -564,7 +566,7 @@ type FlowCollectorLoki struct { // Loki configuration for LOKISTACK mode. This is usefull for an easy loki-operator config. // It will be ignored for other mods // +optional - LokiStack LokiStack `json:"lokiStack,omitempty"` + LokiStack *LokiStack `json:"lokiStack,omitempty"` //+kubebuilder:default:=true // enable storing flows to Loki. It is required for the OpenShift Console plugin installation. diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go index 13113ccd3..a72359f6b 100644 --- a/api/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -473,7 +473,11 @@ func (in *FlowCollectorList) DeepCopyObject() runtime.Object { func (in *FlowCollectorLoki) DeepCopyInto(out *FlowCollectorLoki) { *out = *in out.Manual = in.Manual - out.LokiStack = in.LokiStack + if in.LokiStack != nil { + in, out := &in.LokiStack, &out.LokiStack + *out = new(LokiStack) + **out = **in + } if in.Enable != nil { in, out := &in.Enable, &out.Enable *out = new(bool) diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml index e8b80bffa..d19bb3aaa 100644 --- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml +++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml @@ -3706,7 +3706,7 @@ spec: - topic type: object loki: - description: Loki, the flow store, client settings. + description: loki, the flow store, client settings. properties: authToken: default: DISABLED @@ -4936,8 +4936,8 @@ spec: name: v1beta2 schema: openAPIV3Schema: - description: FlowCollector is the schema for the network flows collection - API, which pilots and configures the underlying deployments. + description: '`FlowCollector` is the schema for the network flows collection + API, which pilots and configures the underlying deployments.' properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -4952,102 +4952,113 @@ spec: metadata: type: object spec: - description: 'FlowCollectorSpec defines the desired state of FlowCollector. + description: 'Defines the desired state of the FlowCollector resource.

*: the mention of "unsupported", or "deprecated" for a feature throughout this document means that this feature is not - officially supported by Red Hat. It may have been, for instance, contributed + officially supported by Red Hat. It might have been, for instance, contributed by the community and accepted without a formal agreement for maintenance. - The product maintainers may provide some support for these features + The product maintainers might provide some support for these features as a best effort only.' properties: agent: - default: - type: EBPF - description: agent for flows extraction. + description: Agent configuration for flows extraction. properties: ebpf: - description: ebpf describes the settings related to the eBPF-based - flow reporter when the "agent.type" property is set to "EBPF". + description: '`ebpf` describes the settings related to the eBPF-based + flow reporter when `spec.agent.type` is set to `EBPF`.' properties: cacheActiveTimeout: default: 5s - description: cacheActiveTimeout is the max period during which - the reporter will aggregate flows before sending. Increasing - `cacheMaxFlows` and `cacheActiveTimeout` can decrease the - network traffic overhead and the CPU load, however you can - expect higher memory consumption and an increased latency - in the flow collection. + description: '`cacheActiveTimeout` is the max period during + which the reporter will aggregate flows before sending. + Increasing `cacheMaxFlows` and `cacheActiveTimeout` can + decrease the network traffic overhead and the CPU load, + however you can expect higher memory consumption and an + increased latency in the flow collection.' pattern: ^\d+(ns|ms|s|m)?$ type: string cacheMaxFlows: default: 100000 - description: cacheMaxFlows is the max number of flows in an - aggregate; when reached, the reporter sends the flows. Increasing - `cacheMaxFlows` and `cacheActiveTimeout` can decrease the - network traffic overhead and the CPU load, however you can - expect higher memory consumption and an increased latency - in the flow collection. + description: '`cacheMaxFlows` is the max number of flows in + an aggregate; when reached, the reporter sends the flows. + Increasing `cacheMaxFlows` and `cacheActiveTimeout` can + decrease the network traffic overhead and the CPU load, + however you can expect higher memory consumption and an + increased latency in the flow collection.' format: int32 minimum: 1 type: integer debug: - description: Debug allows setting some aspects of the internal + description: '`debug` allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed exclusively - for debugging and fine-grained performance optimizations - (for example GOGC, GOMAXPROCS env vars). Users setting its - values do it at their own risk. + for debugging and fine-grained performance optimizations, + such as GOGC and GOMAXPROCS env vars. Users setting its + values do it at their own risk.' properties: env: additionalProperties: type: string - description: env allows passing custom environment variables - to the NetObserv Agent. Useful for passing some very - concrete performance-tuning options (such as GOGC, GOMAXPROCS) - that shouldn't be publicly exposed as part of the FlowCollector - descriptor, as they are only useful in edge debug and - support scenarios. + description: '`env` allows passing custom environment + variables to underlying components. Useful for passing + some very concrete performance-tuning options, such + as GOGC and GOMAXPROCS, that should not be publicly + exposed as part of the FlowCollector descriptor, as + they are only useful in edge debug or support scenarios.' type: object type: object + enableDNSTracking: + default: false + description: Enable the DNS tracking feature. This feature + requires mounting the kernel debug filesystem hence the + eBPF pod has to run as privileged. If the spec.agent.eBPF.privileged + parameter is not set, an error is reported. + type: boolean + enablePktDrop: + default: false + description: Enable the Packets drop flows logging feature. + This feature requires mounting the kernel debug filesystem, + so the eBPF pod has to run as privileged. If the spec.agent.eBPF.privileged + parameter is not set, an error is reported. + type: boolean excludeInterfaces: default: - lo - description: excludeInterfaces contains the interface names - that will be excluded from flow tracing. If an entry is - enclosed by slashes (such as `/br-/`), it will match as - regular expression, otherwise it will be matched as a case-sensitive - string. + description: '`excludeInterfaces` contains the interface names + that will be excluded from flow tracing. An entry is enclosed + by slashes, such as `/br-/`, is matched as a regular expression. + Otherwise it is matched as a case-sensitive string.' items: type: string type: array imagePullPolicy: default: IfNotPresent - description: imagePullPolicy is the Kubernetes pull policy - for the image defined above + description: '`imagePullPolicy` is the Kubernetes pull policy + for the image defined above' enum: - IfNotPresent - Always - Never type: string interfaces: - description: interfaces contains the interface names from + description: '`interfaces` contains the interface names from where flows will be collected. If empty, the agent will fetch all the interfaces in the system, excepting the ones - listed in ExcludeInterfaces. If an entry is enclosed by - slashes (such as `/br-/`), it will match as regular expression, - otherwise it will be matched as a case-sensitive string. + listed in ExcludeInterfaces. An entry is enclosed by slashes, + such as `/br-/`, is matched as a regular expression. Otherwise + it is matched as a case-sensitive string.' items: type: string type: array kafkaBatchSize: default: 10485760 - description: 'kafkaBatchSize limits the maximum size of a - request in bytes before being sent to a partition. Ignored + description: '`kafkaBatchSize` limits the maximum size of + a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 10MB.' type: integer logLevel: default: info - description: logLevel defines the log level for the NetObserv - eBPF Agent + description: '`logLevel` defines the log level for the NetObserv + eBPF Agent' enum: - trace - debug @@ -5058,14 +5069,14 @@ spec: - panic type: string privileged: - description: 'privileged mode for the eBPF Agent container. + description: 'Privileged mode for the eBPF Agent container. In general this setting can be ignored or set to false: in that case, the operator will set granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, to enable its correct operation. If for some reason these - capabilities cannot be set (for example old kernel version - not knowing CAP_BPF) then you can turn on this mode for - more global privileges.' + capabilities cannot be set, such as if an old kernel version + not knowing CAP_BPF is in use, then you can turn on this + mode for more global privileges.' type: boolean resources: default: @@ -5074,7 +5085,7 @@ spec: requests: cpu: 100m memory: 50Mi - description: 'resources are the compute resources required + description: '`resources` are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: limits: @@ -5103,116 +5114,116 @@ spec: type: object sampling: default: 50 - description: sampling rate of the flow reporter. 100 means + description: Sampling rate of the flow reporter. 100 means one flow on 100 is sent. 0 or 1 means all flows are sampled. format: int32 minimum: 0 type: integer type: object ipfix: - description: ipfix - deprecated (*) - describes the settings - related to the IPFIX-based flow reporter when the "agent.type" - property is set to "IPFIX". + description: '`ipfix` - deprecated (*) - describes the + settings related to the IPFIX-based flow reporter when `spec.agent.type` + is set to `IPFIX`.' properties: cacheActiveTimeout: default: 20s - description: cacheActiveTimeout is the max period during which - the reporter will aggregate flows before sending + description: '`cacheActiveTimeout` is the max period during + which the reporter will aggregate flows before sending' pattern: ^\d+(ns|ms|s|m)?$ type: string cacheMaxFlows: default: 400 - description: cacheMaxFlows is the max number of flows in an - aggregate; when reached, the reporter sends the flows + description: '`cacheMaxFlows` is the max number of flows in + an aggregate; when reached, the reporter sends the flows' format: int32 minimum: 0 type: integer clusterNetworkOperator: - description: clusterNetworkOperator defines the settings related - to the OpenShift Cluster Network Operator, when available. + description: '`clusterNetworkOperator` defines the settings + related to the OpenShift Cluster Network Operator, when + available.' properties: namespace: default: openshift-network-operator - description: namespace where the config map is going + description: Namespace where the config map is going to be deployed. type: string type: object forceSampleAll: default: false - description: forceSampleAll allows disabling sampling in the - IPFIX-based flow reporter. It is not recommended to sample - all the traffic with IPFIX, as it might generate cluster - instability. If you REALLY want to do that, set this flag - to true. Use at your own risk. When it is set to true, the - value of "sampling" is ignored. + description: '`forceSampleAll` allows disabling sampling in + the IPFIX-based flow reporter. It is not recommended to + sample all the traffic with IPFIX, as it might generate + cluster instability. If you REALLY want to do that, set + this flag to true. Use at your own risk. When it is set + to true, the value of `sampling` is ignored.' type: boolean ovnKubernetes: - description: ovnKubernetes defines the settings of the OVN-Kubernetes - CNI, when available. This configuration is used when using - OVN's IPFIX exports, without OpenShift. When using OpenShift, - refer to the `clusterNetworkOperator` property instead. + description: '`ovnKubernetes` defines the settings of the + OVN-Kubernetes CNI, when available. This configuration is + used when using OVN''s IPFIX exports, without OpenShift. + When using OpenShift, refer to the `clusterNetworkOperator` + property instead.' properties: containerName: default: ovnkube-node - description: containerName defines the name of the container - to configure for IPFIX. + description: '`containerName` defines the name of the + container to configure for IPFIX.' type: string daemonSetName: default: ovnkube-node - description: daemonSetName defines the name of the DaemonSet - controlling the OVN-Kubernetes pods. + description: '`daemonSetName` defines the name of the + DaemonSet controlling the OVN-Kubernetes pods.' type: string namespace: default: ovn-kubernetes - description: namespace where OVN-Kubernetes pods are deployed. + description: Namespace where OVN-Kubernetes pods are deployed. type: string type: object sampling: default: 400 - description: sampling is the sampling rate on the reporter. + description: '`sampling` is the sampling rate on the reporter. 100 means one flow on 100 is sent. To ensure cluster stability, it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster - stability, refer to "forceSampleAll". Alternatively, you - can use the eBPF Agent instead of IPFIX. + stability, refer to `forceSampleAll`. Alternatively, you + can use the eBPF Agent instead of IPFIX.' format: int32 minimum: 2 type: integer type: object type: default: EBPF - description: type selects the flows tracing agent. Possible values - are "EBPF" (default) to use NetObserv eBPF agent, "IPFIX" - - deprecated (*) - to use the legacy IPFIX collector. "EBPF" - is recommended in most cases as it offers better performances + description: '`type` selects the flows tracing agent. Possible + values are:
- `EBPF` (default) to use NetObserv eBPF agent.
+ - `IPFIX` - deprecated (*) - to use the legacy IPFIX + collector.
`EBPF` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. - "IPFIX" works with OVN-Kubernetes CNI (other CNIs could work + `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, but they would require manual - configuration). + configuration).' enum: - EBPF - IPFIX type: string - required: - - type type: object consolePlugin: - description: consolePlugin defines the settings related to the OpenShift - Console plugin, when available. + description: '`consolePlugin` defines the settings related to the + OpenShift Console plugin, when available.' properties: autoscaler: - description: autoscaler spec of a horizontal pod autoscaler to - set up for the plugin Deployment. + description: '`autoscaler` spec of a horizontal pod autoscaler + to set up for the plugin Deployment.' properties: maxReplicas: default: 3 - description: maxReplicas is the upper limit for the number + description: '`maxReplicas` is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller - than MinReplicas. + than MinReplicas.' format: int32 type: integer metrics: - description: metrics used by the pod autoscaler + description: Metrics used by the pod autoscaler items: description: MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field @@ -5709,29 +5720,34 @@ spec: type: object type: array minReplicas: - description: minReplicas is the lower limit for the number - of replicas to which the autoscaler can scale down. It - defaults to 1 pod. minReplicas is allowed to be 0 if the - alpha feature gate HPAScaleToZero is enabled and at least - one Object or External metric is configured. Scaling is - active as long as at least one metric value is available. + description: '`minReplicas` is the lower limit for the number + of replicas to which the autoscaler can scale down. It defaults + to 1 pod. minReplicas is allowed to be 0 if the alpha feature + gate HPAScaleToZero is enabled and at least one Object or + External metric is configured. Scaling is active as long + as at least one metric value is available.' format: int32 type: integer status: default: DISABLED - description: Status describe the desired status regarding - deploying an horizontal pod autoscaler DISABLED will not - deploy an horizontal pod autoscaler ENABLED will deploy - an horizontal pod autoscaler + description: '`status` describes the desired status regarding + deploying an horizontal pod autoscaler.
- `DISABLED` + will not deploy an horizontal pod autoscaler.
- `ENABLED` + will deploy an horizontal pod autoscaler.
' enum: - DISABLED - ENABLED type: string type: object + enable: + default: true + description: enable the console plugin deployment. spec.Loki.enable + must also be true + type: boolean imagePullPolicy: default: IfNotPresent - description: imagePullPolicy is the Kubernetes pull policy for - the image defined above + description: '`imagePullPolicy` is the Kubernetes pull policy + for the image defined above' enum: - IfNotPresent - Always @@ -5739,7 +5755,7 @@ spec: type: string logLevel: default: info - description: logLevel for the console plugin backend + description: '`logLevel` for the console plugin backend' enum: - trace - debug @@ -5751,8 +5767,8 @@ spec: type: string port: default: 9001 - description: port is the plugin service port. Do not use 9002, - which is reserved for metrics. + description: '`port` is the plugin service port. Do not use 9002, + which is reserved for metrics.' format: int32 maximum: 65535 minimum: 1 @@ -5760,19 +5776,19 @@ spec: portNaming: default: enable: true - description: portNaming defines the configuration of the port-to-service - name translation + description: '`portNaming` defines the configuration of the port-to-service + name translation' properties: enable: default: true - description: enable the console plugin port-to-service name + description: Enable the console plugin port-to-service name translation type: boolean portNames: additionalProperties: type: string - description: 'portNames defines additional port names to use - in the console. Example: portNames: {"3100": "loki"}' + description: '`portNames` defines additional port names to + use in the console, for example, `portNames: {"3100": "loki"}`.' type: object type: object quickFilters: @@ -5794,26 +5810,26 @@ spec: - filter: dst_kind: Service name: Services network - description: quickFilters configures quick filter presets for - the Console plugin + description: '`quickFilters` configures quick filter presets for + the Console plugin' items: - description: QuickFilter defines preset configuration for Console's - quick filters + description: '`QuickFilter` defines preset configuration for + Console''s quick filters' properties: default: - description: default defines whether this filter should - be active by default or not + description: '`default` defines whether this filter should + be active by default or not' type: boolean filter: additionalProperties: type: string - description: 'filter is a set of keys and values to be set - when this filter is selected. Each key can relate to a - list of values using a coma-separated string. Example: - filter: {"src_namespace": "namespace1,namespace2"}' + description: '`filter` is a set of keys and values to be + set when this filter is selected. Each key can relate + to a list of values using a coma-separated string, for + example, `filter: {"src_namespace": "namespace1,namespace2"}`.' type: object name: - description: name of the filter, that will be displayed + description: Name of the filter, that will be displayed in Console type: string required: @@ -5823,17 +5839,17 @@ spec: type: array register: default: true - description: 'register allows, when set to true, to automatically + description: '`register` allows, when set to true, to automatically register the provided console plugin with the OpenShift Console operator. When set to false, you can still register it manually - by editing console.operator.openshift.io/cluster. E.g: oc patch - console.operator.openshift.io cluster --type=''json'' -p ''[{"op": - "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]''' + by editing console.operator.openshift.io/cluster with the following + command: `oc patch console.operator.openshift.io cluster --type=''json'' + -p ''[{"op": "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]''`' type: boolean replicas: default: 1 - description: replicas defines the number of replicas (pods) to - start. + description: '`replicas` defines the number of replicas (pods) + to start.' format: int32 minimum: 0 type: integer @@ -5844,7 +5860,7 @@ spec: requests: cpu: 100m memory: 50Mi - description: 'resources, in terms of compute resources, required + description: '`resources`, in terms of compute resources, required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: limits: @@ -5873,22 +5889,22 @@ spec: type: object deploymentModel: default: DIRECT - description: deploymentModel defines the desired type of deployment - for flow processing. Possible values are "DIRECT" (default) to make - the flow processor listening directly from the agents, or "KAFKA" - to make flows sent to a Kafka pipeline before consumption by the - processor. Kafka can provide better scalability, resiliency and - high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka). + description: '`deploymentModel` defines the desired type of deployment + for flow processing. Possible values are:
- `DIRECT` (default) + to make the flow processor listening directly from the agents.
+ - `KAFKA` to make flows sent to a Kafka pipeline before consumption + by the processor.
Kafka can provide better scalability, resiliency, + and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka).' enum: - DIRECT - KAFKA type: string exporters: - description: exporters define additional optional exporters for custom - consumption or storage. + description: '`exporters` define additional optional exporters for + custom consumption or storage.' items: - description: FlowCollectorExporter defines an additional exporter - to send enriched flows to. + description: '`FlowCollectorExporter` defines an additional exporter + to send enriched flows to.' properties: ipfix: description: IPFIX configuration, such as the IP address and @@ -5896,14 +5912,14 @@ spec: properties: targetHost: default: "" - description: address of the ipfix external receiver + description: Address of the IPFIX external receiver type: string targetPort: - description: port for the ipfix external receiver + description: Port for the IPFIX external receiver type: integer transport: - description: Transport protocol (tcp/udp) to be used for - the IPFIX connection, defaults to tcp + description: Transport protocol (`TCP` or `UDP`) to be used + for the IPFIX connection, defaults to `TCP`. enum: - TCP - UDP @@ -5913,49 +5929,93 @@ spec: - targetPort type: object kafka: - description: kafka configuration, such as the address and topic, + description: Kafka configuration, such as the address and topic, to send enriched flows to. properties: address: default: "" - description: address of the Kafka server + description: Address of the Kafka server type: string + sasl: + description: SASL authentication configuration. Unsupported + (*) + properties: + clientIDKey: + description: Key for client ID within the provided `reference` + type: string + clientSecretKey: + description: Key for client secret within the provided + `reference` + type: string + reference: + description: Reference to the secret or config map containing + the client ID and secret + properties: + name: + description: Name of the config map or secret to + reference + type: string + namespace: + default: "" + description: Namespace of the config map or secret. + If omitted, assumes same namespace as where NetObserv + is deployed. If the namespace is different, the + config map or the secret will be copied so that + it can be mounted as required. + type: string + type: + description: 'Type for the reference: "configmap" + or "secret"' + enum: + - configmap + - secret + type: string + type: object + type: + default: DISABLED + description: Type of SASL authentication to use, or + `DISABLED` if SASL is not used + enum: + - DISABLED + - PLAIN + - SCRAM-SHA512 + type: string + type: object tls: - description: tls client configuration. When using TLS, verify + description: TLS client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, - generally 9093. Note that, when eBPF agents are used, - Kafka certificate needs to be copied in the agent namespace - (by default it's netobserv-privileged). + generally 9093. properties: caCert: - description: caCert defines the reference of the certificate - for the Certificate Authority + description: '`caCert` defines the reference of the + certificate for the Certificate Authority' properties: certFile: - description: certFile defines the path to the certificate - file name within the config map or secret + description: '`certFile` defines the path to the + certificate file name within the config map or + secret' type: string certKey: - description: certKey defines the path to the certificate - private key file name within the config map or - secret. Omit when the key is not necessary. + description: '`certKey` defines the path to the + certificate private key file name within the config + map or secret. Omit when the key is not necessary.' type: string name: - description: name of the config map or secret containing + description: Name of the config map or secret containing certificates type: string namespace: default: "" - description: namespace of the config map or secret - containing certificates. If omitted, assumes same - namespace as where NetObserv is deployed. If the - namespace is different, the config map or the - secret will be copied so that it can be mounted - as required. + description: Namespace of the config map or secret + containing certificates. If omitted, assumes the + same namespace as where NetObserv is deployed. + If the namespace is different, the config map + or the secret will be copied so that it can be + mounted as required. type: string type: - description: 'type for the certificate reference: - "configmap" or "secret"' + description: 'Type for the certificate reference: + `configmap` or `secret`' enum: - configmap - secret @@ -5963,44 +6023,45 @@ spec: type: object enable: default: false - description: enable TLS + description: Enable TLS type: boolean insecureSkipVerify: default: false - description: insecureSkipVerify allows skipping client-side - verification of the server certificate If set to true, - CACert field will be ignored + description: '`insecureSkipVerify` allows skipping client-side + verification of the server certificate. If set to + true, the `caCert` field is ignored.' type: boolean userCert: - description: userCert defines the user certificate reference, - used for mTLS (you can ignore it when using regular, - one-way TLS) + description: '`userCert` defines the user certificate + reference and is used for mTLS (you can ignore it + when using one-way TLS)' properties: certFile: - description: certFile defines the path to the certificate - file name within the config map or secret + description: '`certFile` defines the path to the + certificate file name within the config map or + secret' type: string certKey: - description: certKey defines the path to the certificate - private key file name within the config map or - secret. Omit when the key is not necessary. + description: '`certKey` defines the path to the + certificate private key file name within the config + map or secret. Omit when the key is not necessary.' type: string name: - description: name of the config map or secret containing + description: Name of the config map or secret containing certificates type: string namespace: default: "" - description: namespace of the config map or secret - containing certificates. If omitted, assumes same - namespace as where NetObserv is deployed. If the - namespace is different, the config map or the - secret will be copied so that it can be mounted - as required. + description: Namespace of the config map or secret + containing certificates. If omitted, assumes the + same namespace as where NetObserv is deployed. + If the namespace is different, the config map + or the secret will be copied so that it can be + mounted as required. type: string type: - description: 'type for the certificate reference: - "configmap" or "secret"' + description: 'Type for the certificate reference: + `configmap` or `secret`' enum: - configmap - secret @@ -6009,7 +6070,7 @@ spec: type: object topic: default: "" - description: kafka topic to use. It must exist, NetObserv + description: Kafka topic to use. It must exist, NetObserv will not create it. type: string required: @@ -6017,9 +6078,9 @@ spec: - topic type: object type: - description: type selects the type of exporters. The available - options are "KAFKA" and "IPFIX". "IPFIX" is unsupported - (*). + description: '`type` selects the type of exporters. The available + options are `KAFKA` and `IPFIX`. `IPFIX` is unsupported + (*).' enum: - KAFKA - IPFIX @@ -6029,48 +6090,89 @@ spec: type: object type: array kafka: - description: kafka configuration, allowing to use Kafka as a broker - as part of the flow collection pipeline. Available when the "spec.deploymentModel" - is "KAFKA". + description: Kafka configuration, allowing to use Kafka as a broker + as part of the flow collection pipeline. Available when the `spec.deploymentModel` + is `KAFKA`. properties: address: default: "" - description: address of the Kafka server + description: Address of the Kafka server type: string + sasl: + description: SASL authentication configuration. Unsupported + (*) + properties: + clientIDKey: + description: Key for client ID within the provided `reference` + type: string + clientSecretKey: + description: Key for client secret within the provided `reference` + type: string + reference: + description: Reference to the secret or config map containing + the client ID and secret + properties: + name: + description: Name of the config map or secret to reference + type: string + namespace: + default: "" + description: Namespace of the config map or secret. If + omitted, assumes same namespace as where NetObserv is + deployed. If the namespace is different, the config + map or the secret will be copied so that it can be mounted + as required. + type: string + type: + description: 'Type for the reference: "configmap" or "secret"' + enum: + - configmap + - secret + type: string + type: object + type: + default: DISABLED + description: Type of SASL authentication to use, or `DISABLED` + if SASL is not used + enum: + - DISABLED + - PLAIN + - SCRAM-SHA512 + type: string + type: object tls: - description: tls client configuration. When using TLS, verify + description: TLS client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally - 9093. Note that, when eBPF agents are used, Kafka certificate - needs to be copied in the agent namespace (by default it's netobserv-privileged). + 9093. properties: caCert: - description: caCert defines the reference of the certificate - for the Certificate Authority + description: '`caCert` defines the reference of the certificate + for the Certificate Authority' properties: certFile: - description: certFile defines the path to the certificate - file name within the config map or secret + description: '`certFile` defines the path to the certificate + file name within the config map or secret' type: string certKey: - description: certKey defines the path to the certificate + description: '`certKey` defines the path to the certificate private key file name within the config map or secret. - Omit when the key is not necessary. + Omit when the key is not necessary.' type: string name: - description: name of the config map or secret containing + description: Name of the config map or secret containing certificates type: string namespace: default: "" - description: namespace of the config map or secret containing - certificates. If omitted, assumes same namespace as - where NetObserv is deployed. If the namespace is different, - the config map or the secret will be copied so that - it can be mounted as required. + description: Namespace of the config map or secret containing + certificates. If omitted, assumes the same namespace + as where NetObserv is deployed. If the namespace is + different, the config map or the secret will be copied + so that it can be mounted as required. type: string type: - description: 'type for the certificate reference: "configmap" - or "secret"' + description: 'Type for the certificate reference: `configmap` + or `secret`' enum: - configmap - secret @@ -6078,43 +6180,43 @@ spec: type: object enable: default: false - description: enable TLS + description: Enable TLS type: boolean insecureSkipVerify: default: false - description: insecureSkipVerify allows skipping client-side - verification of the server certificate If set to true, CACert - field will be ignored + description: '`insecureSkipVerify` allows skipping client-side + verification of the server certificate. If set to true, + the `caCert` field is ignored.' type: boolean userCert: - description: userCert defines the user certificate reference, - used for mTLS (you can ignore it when using regular, one-way - TLS) + description: '`userCert` defines the user certificate reference + and is used for mTLS (you can ignore it when using one-way + TLS)' properties: certFile: - description: certFile defines the path to the certificate - file name within the config map or secret + description: '`certFile` defines the path to the certificate + file name within the config map or secret' type: string certKey: - description: certKey defines the path to the certificate + description: '`certKey` defines the path to the certificate private key file name within the config map or secret. - Omit when the key is not necessary. + Omit when the key is not necessary.' type: string name: - description: name of the config map or secret containing + description: Name of the config map or secret containing certificates type: string namespace: default: "" - description: namespace of the config map or secret containing - certificates. If omitted, assumes same namespace as - where NetObserv is deployed. If the namespace is different, - the config map or the secret will be copied so that - it can be mounted as required. + description: Namespace of the config map or secret containing + certificates. If omitted, assumes the same namespace + as where NetObserv is deployed. If the namespace is + different, the config map or the secret will be copied + so that it can be mounted as required. type: string type: - description: 'type for the certificate reference: "configmap" - or "secret"' + description: 'Type for the certificate reference: `configmap` + or `secret`' enum: - configmap - secret @@ -6123,7 +6225,7 @@ spec: type: object topic: default: "" - description: kafka topic to use. It must exist, NetObserv will + description: Kafka topic to use. It must exist, NetObserv will not create it. type: string required: @@ -6135,69 +6237,98 @@ spec: properties: batchSize: default: 102400 - description: batchSize is max batch size (in bytes) of logs to - accumulate before sending. + description: '`batchSize` is the maximum batch size (in bytes) + of logs to accumulate before sending.' format: int64 minimum: 1 type: integer batchWait: default: 1s - description: batchWait is max time to wait before sending a batch. + description: '`batchWait` is the maximum time to wait before sending + a batch.' type: string + enable: + default: true + description: enable storing flows to Loki. It is required for + the OpenShift Console plugin installation. + type: boolean + lokiStack: + description: Loki configuration for LOKISTACK mode. This is usefull + for an easy loki-operator config. It will be ignored for other + mods + properties: + name: + default: loki + type: string + namespace: + default: netobserv + type: string + type: object manual: + description: Loki configuration for MANUAL mode. This is the more + flexible configuration. It will be ignored for other mods properties: authToken: default: DISABLED - description: AuthToken describe the way to get a token to - authenticate to Loki. DISABLED will not send any token with - the request. HOST - deprecated (*) - will use the - local pod service account to authenticate to Loki. FORWARD - will forward the user token for authorization. When using - the Loki Operator, this should be set to `FORWARD`. + description: '`authToken` describes the way to get a token + to authenticate to Loki.
- `DISABLED` will not send + any token with the request.
- `FORWARD` will forward + the user token for authorization.
- `HOST` - deprecated + (*) - will use the local pod service account to authenticate + to Loki.
When using the Loki Operator, this must be + set to `FORWARD`.' enum: - DISABLED - HOST - FORWARD type: string + ingesterUrl: + default: http://loki:3100/ + description: '`ingesterUrl` is the address of an existing + Loki service to push the flows to. When using the Loki Operator, + set it to the Loki gateway service with the `network` tenant + set in path, for example https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network.' + type: string querierUrl: - description: querierURL specifies the address of the Loki + description: '`querierURL` specifies the address of the Loki querier service, in case it is different from the Loki ingester URL. If empty, the URL value will be used (assuming that the Loki ingester and querier are in the same server). When using the Loki Operator, do not set it, since ingestion - and queries use the Loki gateway. + and queries use the Loki gateway.' type: string statusTls: - description: tls client configuration for loki status URL. + description: TLS client configuration for Loki status URL. properties: caCert: - description: caCert defines the reference of the certificate - for the Certificate Authority + description: '`caCert` defines the reference of the certificate + for the Certificate Authority' properties: certFile: - description: certFile defines the path to the certificate - file name within the config map or secret + description: '`certFile` defines the path to the certificate + file name within the config map or secret' type: string certKey: - description: certKey defines the path to the certificate + description: '`certKey` defines the path to the certificate private key file name within the config map or secret. - Omit when the key is not necessary. + Omit when the key is not necessary.' type: string name: - description: name of the config map or secret containing + description: Name of the config map or secret containing certificates type: string namespace: default: "" - description: namespace of the config map or secret - containing certificates. If omitted, assumes same - namespace as where NetObserv is deployed. If the - namespace is different, the config map or the secret - will be copied so that it can be mounted as required. + description: Namespace of the config map or secret + containing certificates. If omitted, assumes the + same namespace as where NetObserv is deployed. If + the namespace is different, the config map or the + secret will be copied so that it can be mounted + as required. type: string type: - description: 'type for the certificate reference: - "configmap" or "secret"' + description: 'Type for the certificate reference: + `configmap` or `secret`' enum: - configmap - secret @@ -6205,43 +6336,44 @@ spec: type: object enable: default: false - description: enable TLS + description: Enable TLS type: boolean insecureSkipVerify: default: false - description: insecureSkipVerify allows skipping client-side - verification of the server certificate If set to true, - CACert field will be ignored + description: '`insecureSkipVerify` allows skipping client-side + verification of the server certificate. If set to true, + the `caCert` field is ignored.' type: boolean userCert: - description: userCert defines the user certificate reference, - used for mTLS (you can ignore it when using regular, - one-way TLS) + description: '`userCert` defines the user certificate + reference and is used for mTLS (you can ignore it when + using one-way TLS)' properties: certFile: - description: certFile defines the path to the certificate - file name within the config map or secret + description: '`certFile` defines the path to the certificate + file name within the config map or secret' type: string certKey: - description: certKey defines the path to the certificate + description: '`certKey` defines the path to the certificate private key file name within the config map or secret. - Omit when the key is not necessary. + Omit when the key is not necessary.' type: string name: - description: name of the config map or secret containing + description: Name of the config map or secret containing certificates type: string namespace: default: "" - description: namespace of the config map or secret - containing certificates. If omitted, assumes same - namespace as where NetObserv is deployed. If the - namespace is different, the config map or the secret - will be copied so that it can be mounted as required. + description: Namespace of the config map or secret + containing certificates. If omitted, assumes the + same namespace as where NetObserv is deployed. If + the namespace is different, the config map or the + secret will be copied so that it can be mounted + as required. type: string type: - description: 'type for the certificate reference: - "configmap" or "secret"' + description: 'Type for the certificate reference: + `configmap` or `secret`' enum: - configmap - secret @@ -6249,53 +6381,54 @@ spec: type: object type: object statusUrl: - description: statusURL specifies the address of the Loki /ready - /metrics /config endpoints, in case it is different from - the Loki querier URL. If empty, the QuerierURL value will - be used. This is useful to show error messages and some - context in the frontend. When using the Loki Operator, set - it to the Loki HTTP query frontend service, for example - https://loki-query-frontend-http.netobserv.svc:3100/. statusTLS - configuration will be used when statusUrl is set. + description: '`statusURL` specifies the address of the Loki + `/ready`, `/metrics` and `/config` endpoints, in case it + is different from the Loki querier URL. If empty, the `querierURL` + value will be used. This is useful to show error messages + and some context in the frontend. When using the Loki Operator, + set it to the Loki HTTP query frontend service, for example + https://loki-query-frontend-http.netobserv.svc:3100/. `statusTLS` + configuration will be used when `statusUrl` is set.' type: string tenantID: default: netobserv - description: tenantID is the Loki X-Scope-OrgID that identifies - the tenant for each request. When using the Loki Operator, - set it to `network`, which corresponds to a special tenant - mode. + description: '`tenantID` is the Loki `X-Scope-OrgID` that + identifies the tenant for each request. When using the Loki + Operator, set it to `network`, which corresponds to a special + tenant mode.' type: string tls: - description: tls client configuration for loki URL. + description: TLS client configuration for Loki URL. properties: caCert: - description: caCert defines the reference of the certificate - for the Certificate Authority + description: '`caCert` defines the reference of the certificate + for the Certificate Authority' properties: certFile: - description: certFile defines the path to the certificate - file name within the config map or secret + description: '`certFile` defines the path to the certificate + file name within the config map or secret' type: string certKey: - description: certKey defines the path to the certificate + description: '`certKey` defines the path to the certificate private key file name within the config map or secret. - Omit when the key is not necessary. + Omit when the key is not necessary.' type: string name: - description: name of the config map or secret containing + description: Name of the config map or secret containing certificates type: string namespace: default: "" - description: namespace of the config map or secret - containing certificates. If omitted, assumes same - namespace as where NetObserv is deployed. If the - namespace is different, the config map or the secret - will be copied so that it can be mounted as required. + description: Namespace of the config map or secret + containing certificates. If omitted, assumes the + same namespace as where NetObserv is deployed. If + the namespace is different, the config map or the + secret will be copied so that it can be mounted + as required. type: string type: - description: 'type for the certificate reference: - "configmap" or "secret"' + description: 'Type for the certificate reference: + `configmap` or `secret`' enum: - configmap - secret @@ -6303,177 +6436,178 @@ spec: type: object enable: default: false - description: enable TLS + description: Enable TLS type: boolean insecureSkipVerify: default: false - description: insecureSkipVerify allows skipping client-side - verification of the server certificate If set to true, - CACert field will be ignored + description: '`insecureSkipVerify` allows skipping client-side + verification of the server certificate. If set to true, + the `caCert` field is ignored.' type: boolean userCert: - description: userCert defines the user certificate reference, - used for mTLS (you can ignore it when using regular, - one-way TLS) + description: '`userCert` defines the user certificate + reference and is used for mTLS (you can ignore it when + using one-way TLS)' properties: certFile: - description: certFile defines the path to the certificate - file name within the config map or secret + description: '`certFile` defines the path to the certificate + file name within the config map or secret' type: string certKey: - description: certKey defines the path to the certificate + description: '`certKey` defines the path to the certificate private key file name within the config map or secret. - Omit when the key is not necessary. + Omit when the key is not necessary.' type: string name: - description: name of the config map or secret containing + description: Name of the config map or secret containing certificates type: string namespace: default: "" - description: namespace of the config map or secret - containing certificates. If omitted, assumes same - namespace as where NetObserv is deployed. If the - namespace is different, the config map or the secret - will be copied so that it can be mounted as required. + description: Namespace of the config map or secret + containing certificates. If omitted, assumes the + same namespace as where NetObserv is deployed. If + the namespace is different, the config map or the + secret will be copied so that it can be mounted + as required. type: string type: - description: 'type for the certificate reference: - "configmap" or "secret"' + description: 'Type for the certificate reference: + `configmap` or `secret`' enum: - configmap - secret type: string type: object type: object - url: - default: http://loki:3100/ - description: url is the address of an existing Loki service - to push the flows to. When using the Loki Operator, set - it to the Loki gateway service with the `network` tenant - set in path, for example https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network. - type: string type: object maxBackoff: default: 5s - description: maxBackoff is the maximum backoff time for client - connection between retries. + description: '`maxBackoff` is the maximum backoff time for client + connection between retries.' type: string maxRetries: default: 2 - description: maxRetries is the maximum number of retries for client - connections. + description: '`maxRetries` is the maximum number of retries for + client connections.' format: int32 minimum: 0 type: integer minBackoff: default: 1s - description: minBackoff is the initial backoff time for client - connection between retries. + description: '`minBackoff` is the initial backoff time for client + connection between retries.' type: string mode: + default: MANUAL enum: - MANUAL + - LOKISTACK type: string staticLabels: additionalProperties: type: string default: app: netobserv-flowcollector - description: staticLabels is a map of common labels to set on - each flow. + description: '`staticLabels` is a map of common labels to set + on each flow.' type: object timeout: default: 10s - description: timeout is the maximum time connection / request - limit. A Timeout of zero means no timeout. + description: '`timeout` is the maximum time connection / request + limit. A timeout of zero means no timeout.' type: string type: object namespace: - description: namespace where NetObserv pods are deployed. If empty, + default: netobserv + description: Namespace where NetObserv pods are deployed. If empty, the namespace of the operator is going to be used. type: string processor: - description: processor defines the settings of the component that - receives the flows from the agent, enriches them, and forwards them - to the Loki persistence layer. + description: '`processor` defines the settings of the component that + receives the flows from the agent, enriches them, generates metrics, + and forwards them to the Loki persistence layer and/or any available + exporter.' properties: conversationEndTimeout: default: 10s - description: conversation end timeout is the duration of time - to wait from the last flow log to end a conversation + description: '`conversationEndTimeout` is the time to wait after + a network flow is received, to consider the conversation ended. + This delay is ignored when a FIN packet is collected for TCP + flows (see `conversationTerminatingTimeout` instead).' type: string conversationHeartbeatInterval: default: 30s - description: conversation heartbeat interval is the duration of - time to wait between heartbeat reports of a conversation + description: '`conversationHeartbeatInterval` is the time to wait + between "tick" events of a conversation' type: string conversationTerminatingTimeout: default: 5s - description: conversation terminating timeout is the duration - of time to wait from detected FIN flag to end a connection + description: '`conversationTerminatingTimeout` is the time to + wait from detected FIN flag to end a conversation. Only relevant + for TCP flows.' type: string debug: - description: Debug allows setting some aspects of the internal + description: '`debug` allows setting some aspects of the internal configuration of the flow processor. This section is aimed exclusively - for debugging and fine-grained performance optimizations (for - example GOGC, GOMAXPROCS env vars). Users setting its values - do it at their own risk. + for debugging and fine-grained performance optimizations, such + as GOGC and GOMAXPROCS env vars. Users setting its values do + it at their own risk.' properties: env: additionalProperties: type: string - description: env allows passing custom environment variables - to the NetObserv Agent. Useful for passing some very concrete - performance-tuning options (such as GOGC, GOMAXPROCS) that - shouldn't be publicly exposed as part of the FlowCollector - descriptor, as they are only useful in edge debug and support - scenarios. + description: '`env` allows passing custom environment variables + to underlying components. Useful for passing some very concrete + performance-tuning options, such as GOGC and GOMAXPROCS, + that should not be publicly exposed as part of the FlowCollector + descriptor, as they are only useful in edge debug or support + scenarios.' type: object type: object dropUnusedFields: default: true - description: dropUnusedFields allows, when set to true, to drop - fields that are known to be unused by OVS, in order to save - storage space. + description: '`dropUnusedFields` allows, when set to true, to + drop fields that are known to be unused by OVS, to save storage + space.' type: boolean enableKubeProbes: default: true - description: enableKubeProbes is a flag to enable or disable Kubernetes - liveness and readiness probes + description: '`enableKubeProbes` is a flag to enable or disable + Kubernetes liveness and readiness probes' type: boolean healthPort: default: 8080 - description: healthPort is a collector HTTP port in the Pod that - exposes the health check API + description: '`healthPort` is a collector HTTP port in the Pod + that exposes the health check API' format: int32 maximum: 65535 minimum: 1 type: integer imagePullPolicy: default: IfNotPresent - description: imagePullPolicy is the Kubernetes pull policy for - the image defined above + description: '`imagePullPolicy` is the Kubernetes pull policy + for the image defined above' enum: - IfNotPresent - Always - Never type: string kafkaConsumerAutoscaler: - description: kafkaConsumerAutoscaler spec of a horizontal pod - autoscaler to set up for flowlogs-pipeline-transformer, which - consumes Kafka messages. This setting is ignored when Kafka - is disabled. + description: '`kafkaConsumerAutoscaler` is the spec of a horizontal + pod autoscaler to set up for `flowlogs-pipeline-transformer`, + which consumes Kafka messages. This setting is ignored when + Kafka is disabled.' properties: maxReplicas: default: 3 - description: maxReplicas is the upper limit for the number + description: '`maxReplicas` is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller - than MinReplicas. + than MinReplicas.' format: int32 type: integer metrics: - description: metrics used by the pod autoscaler + description: Metrics used by the pod autoscaler items: description: MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field @@ -6970,20 +7104,20 @@ spec: type: object type: array minReplicas: - description: minReplicas is the lower limit for the number - of replicas to which the autoscaler can scale down. It - defaults to 1 pod. minReplicas is allowed to be 0 if the - alpha feature gate HPAScaleToZero is enabled and at least - one Object or External metric is configured. Scaling is - active as long as at least one metric value is available. + description: '`minReplicas` is the lower limit for the number + of replicas to which the autoscaler can scale down. It defaults + to 1 pod. minReplicas is allowed to be 0 if the alpha feature + gate HPAScaleToZero is enabled and at least one Object or + External metric is configured. Scaling is active as long + as at least one metric value is available.' format: int32 type: integer status: default: DISABLED - description: Status describe the desired status regarding - deploying an horizontal pod autoscaler DISABLED will not - deploy an horizontal pod autoscaler ENABLED will deploy - an horizontal pod autoscaler + description: '`status` describes the desired status regarding + deploying an horizontal pod autoscaler.
- `DISABLED` + will not deploy an horizontal pod autoscaler.
- `ENABLED` + will deploy an horizontal pod autoscaler.
' enum: - DISABLED - ENABLED @@ -6991,27 +7125,27 @@ spec: type: object kafkaConsumerBatchSize: default: 10485760 - description: 'kafkaConsumerBatchSize indicates to the broker the - maximum batch size, in bytes, that the consumer will accept. + description: '`kafkaConsumerBatchSize` indicates to the broker + the maximum batch size, in bytes, that the consumer will accept. Ignored when not using Kafka. Default: 10MB.' type: integer kafkaConsumerQueueCapacity: default: 1000 - description: kafkaConsumerQueueCapacity defines the capacity of - the internal message queue used in the Kafka consumer client. - Ignored when not using Kafka. + description: '`kafkaConsumerQueueCapacity` defines the capacity + of the internal message queue used in the Kafka consumer client. + Ignored when not using Kafka.' type: integer kafkaConsumerReplicas: default: 3 - description: kafkaConsumerReplicas defines the number of replicas - (pods) to start for flowlogs-pipeline-transformer, which consumes - Kafka messages. This setting is ignored when Kafka is disabled. + description: '`kafkaConsumerReplicas` defines the number of replicas + (pods) to start for `flowlogs-pipeline-transformer`, which consumes + Kafka messages. This setting is ignored when Kafka is disabled.' format: int32 minimum: 0 type: integer logLevel: default: info - description: logLevel of the collector runtime + description: '`logLevel` of the processor runtime' enum: - trace - debug @@ -7023,11 +7157,13 @@ spec: type: string logTypes: default: FLOWS - description: logTypes defines the desired record types to generate. - Possible values are "FLOWS" (default) to export flowLogs, "CONVERSATIONS" - to generate newConnection, heartbeat, endConnection events, - "ENDED_CONVERSATIONS" to generate only endConnection events - or "ALL" to generate both flow logs and conversations events + description: '`logTypes` defines the desired record types to generate. + Possible values are:
- `FLOWS` (default) to export regular + network flows
- `CONVERSATIONS` to generate events for started + conversations, ended conversations as well as periodic "tick" + updates
- `ENDED_CONVERSATIONS` to generate only ended conversations + events
- `ALL` to generate both network flows and all conversations + events
' enum: - FLOWS - CONVERSATIONS @@ -7035,21 +7171,21 @@ spec: - ALL type: string metrics: - description: Metrics define the processor configuration regarding - metrics + description: '`Metrics` define the processor configuration regarding + metrics' properties: disableAlerts: - description: 'disableAlerts is a list of alerts that should - be disabled. Possible values are: `NetObservNoFlows`, which - is triggered when no flows are being observed for a certain - period. `NetObservLokiError`, which is triggered when flows - are being dropped due to Loki errors.' + description: '`disableAlerts` is a list of alerts that should + be disabled. Possible values are:
`NetObservNoFlows`, + which is triggered when no flows are being observed for + a certain period.
`NetObservLokiError`, which is triggered + when flows are being dropped due to Loki errors.
' items: - description: 'Name of a processor alert. Possible values - are: `NetObservNoFlows`, which is triggered when no flows - are being observed for a certain period. `NetObservLokiError`, - which is triggered when flows are being dropped due to - Loki errors.' + description: Name of a processor alert. Possible values + are:
- `NetObservNoFlows`, which is triggered when + no flows are being observed for a certain period.
+ - `NetObservLokiError`, which is triggered when flows + are being dropped due to Loki errors.
enum: - NetObservNoFlows - NetObservLokiError @@ -7059,21 +7195,21 @@ spec: default: - egress - packets - description: 'ignoreTags is a list of tags to specify which + description: '`ignoreTags` is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions - . Available tags are: egress, ingress, flows, bytes, packets, - namespaces, nodes, workloads' + . Available tags are: `egress`, `ingress`, `flows`, `bytes`, + `packets`, `namespaces`, `nodes`, `workloads`.' items: type: string type: array server: - description: metricsServer endpoint configuration for Prometheus + description: Metrics server endpoint configuration for Prometheus scraper properties: port: default: 9102 - description: the prometheus HTTP port + description: The prometheus HTTP port format: int32 maximum: 65535 minimum: 1 @@ -7082,34 +7218,36 @@ spec: description: TLS configuration. properties: provided: - description: TLS configuration. + description: TLS configuration when `type` is set + to `PROVIDED`. properties: certFile: - description: certFile defines the path to the + description: '`certFile` defines the path to the certificate file name within the config map - or secret + or secret' type: string certKey: - description: certKey defines the path to the certificate - private key file name within the config map - or secret. Omit when the key is not necessary. + description: '`certKey` defines the path to the + certificate private key file name within the + config map or secret. Omit when the key is not + necessary.' type: string name: - description: name of the config map or secret + description: Name of the config map or secret containing certificates type: string namespace: default: "" - description: namespace of the config map or secret + description: Namespace of the config map or secret containing certificates. If omitted, assumes - same namespace as where NetObserv is deployed. + the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. type: string type: - description: 'type for the certificate reference: - "configmap" or "secret"' + description: 'Type for the certificate reference: + `configmap` or `secret`' enum: - configmap - secret @@ -7117,11 +7255,11 @@ spec: type: object type: default: DISABLED - description: Select the type of TLS configuration - "DISABLED" (default) to not configure TLS for the - endpoint, "PROVIDED" to manually provide cert file - and a key file, and "AUTO" to use OpenShift auto - generated certificate using annotations + description: Select the type of TLS configuration:
+ - `DISABLED` (default) to not configure TLS for + the endpoint. - `PROVIDED` to manually provide cert + file and a key file. - `AUTO` to use OpenShift auto + generated certificate using annotations. enum: - DISABLED - PROVIDED @@ -7132,16 +7270,16 @@ spec: type: object port: default: 2055 - description: 'port of the flow collector (host port) By conventions, - some value are not authorized port must not be below 1024 and - must not equal this values: 4789,6081,500, and 4500' + description: Port of the flow collector (host port). By convention, + some values are forbidden. It must be greater than 1024 and + different from 4500, 4789 and 6081. format: int32 maximum: 65535 minimum: 1025 type: integer profilePort: - description: profilePort allows setting up a Go pprof profiler - listening to this port + description: '`profilePort` allows setting up a Go pprof profiler + listening to this port' format: int32 maximum: 65535 minimum: 0 @@ -7153,7 +7291,7 @@ spec: requests: cpu: 100m memory: 100Mi - description: 'resources are the compute resources required by + description: '`resources` are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: limits: @@ -7180,16 +7318,13 @@ spec: type: object type: object type: object - required: - - agent - - deploymentModel type: object status: - description: FlowCollectorStatus defines the observed state of FlowCollector + description: '`FlowCollectorStatus` defines the observed state of FlowCollector' properties: conditions: - description: conditions represent the latest available observations - of an object's state + description: '`conditions` represent the latest available observations + of an object''s state' items: description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct @@ -7259,7 +7394,7 @@ spec: type: object type: array namespace: - description: namespace where console plugin and flowlogs-pipeline + description: Namespace where console plugin and flowlogs-pipeline have been deployed. type: string required: diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml index e4ffc60e1..0e8c84ff1 100644 --- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml +++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml @@ -6240,6 +6240,9 @@ spec: the OpenShift Console plugin installation. type: boolean lokiStack: + description: Loki configuration for LOKISTACK mode. This is usefull + for an easy loki-operator config. It will be ignored for other + mods properties: name: default: loki @@ -6247,11 +6250,10 @@ spec: namespace: default: netobserv type: string - required: - - name - - namespace type: object manual: + description: Loki configuration for MANUAL mode. This is the more + flexible configuration. It will be ignored for other mods properties: authToken: default: DISABLED @@ -6484,7 +6486,7 @@ spec: connection between retries.' type: string mode: - default: LOKISTACK + default: MANUAL enum: - MANUAL - LOKISTACK diff --git a/controllers/flowcollector_controller_iso_test.go b/controllers/flowcollector_controller_iso_test.go index 3033dee78..51ae6b03b 100644 --- a/controllers/flowcollector_controller_iso_test.go +++ b/controllers/flowcollector_controller_iso_test.go @@ -156,6 +156,7 @@ func flowCollectorIsoSpecs() { }, }, Enable: pointer.Bool(true), + Mode: flowslatest.LokiModeManual, BatchWait: &metav1.Duration{Duration: time.Second}, BatchSize: 100, Timeout: &metav1.Duration{Duration: time.Second}, diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md index 533eb032a..251300cfe 100644 --- a/docs/FlowCollector.md +++ b/docs/FlowCollector.md @@ -11032,14 +11032,14 @@ loki, the flow store, client settings.
@@ -11078,7 +11078,7 @@ loki, the flow store, client settings.

Enum: MANUAL, LOKISTACK
- Default: LOKISTACK
+ Default: MANUAL
@@ -11108,7 +11108,7 @@ loki, the flow store, client settings. - +Loki configuration for LOKISTACK mode. This is usefull for an easy loki-operator config. It will be ignored for other mods
lokiStack object -
+ Loki configuration for LOKISTACK mode. This is usefull for an easy loki-operator config. It will be ignored for other mods
false
manual object -
+ Loki configuration for MANUAL mode. This is the more flexible configuration. It will be ignored for other mods
false
false
@@ -11127,7 +11127,7 @@ loki, the flow store, client settings.
Default: loki
- + @@ -11136,7 +11136,7 @@ loki, the flow store, client settings.
Default: netobserv
- +
truefalse
namespace string truefalse
@@ -11146,7 +11146,7 @@ loki, the flow store, client settings. - +Loki configuration for MANUAL mode. This is the more flexible configuration. It will be ignored for other mods diff --git a/pkg/helper/flowcollector.go b/pkg/helper/flowcollector.go index 78369f217..d1a9825b2 100644 --- a/pkg/helper/flowcollector.go +++ b/pkg/helper/flowcollector.go @@ -110,8 +110,21 @@ func LokiForwardUserToken(spec *flowslatest.FlowCollectorLoki) bool { } } +func getLokiStackNameAndNamespace(spec *flowslatest.LokiStack) (string, string) { + if spec != nil { + return spec.Name, spec.Namespace + } + return "loki", "netobserv" +} + func lokiStackGatewayURL(spec *flowslatest.FlowCollectorLoki) string { - return "https://" + spec.LokiStack.Name + "-gateway-http." + spec.LokiStack.Namespace + ".svc:8080/api/logs/v1/network/" + name, namespace := getLokiStackNameAndNamespace(spec.LokiStack) + return "https://" + name + "-gateway-http." + namespace + ".svc:8080/api/logs/v1/network/" +} + +func lokiStackStatusURL(spec *flowslatest.FlowCollectorLoki) string { + name, namespace := getLokiStackNameAndNamespace(spec.LokiStack) + return "https://" + name + "-query-frontend-http." + namespace + ".svc:3100/" } func LokiIngesterURL(spec *flowslatest.FlowCollectorLoki) string { @@ -138,7 +151,7 @@ func LokiQuerierURL(spec *flowslatest.FlowCollectorLoki) string { func LokiStatusURL(spec *flowslatest.FlowCollectorLoki) string { switch spec.Mode { case flowslatest.LokiModeLokiStack: - return "https://" + spec.LokiStack.Name + "-query-frontend-http." + spec.LokiStack.Namespace + ".svc:3100/" + return lokiStackStatusURL(spec) default: if spec.Manual.StatusURL != "" { return spec.Manual.StatusURL @@ -159,11 +172,12 @@ func LokiTenantID(spec *flowslatest.FlowCollectorLoki) string { func LokiTLS(spec *flowslatest.FlowCollectorLoki) *flowslatest.ClientTLS { switch spec.Mode { case flowslatest.LokiModeLokiStack: + name, _ := getLokiStackNameAndNamespace(spec.LokiStack) clientTLS := &flowslatest.ClientTLS{ Enable: true, CACert: flowslatest.CertificateReference{ Type: flowslatest.RefTypeConfigMap, - Name: spec.LokiStack.Name + "-gateway-ca-bundle", + Name: name + "-gateway-ca-bundle", CertFile: "service-ca.crt", }, InsecureSkipVerify: false, @@ -177,17 +191,18 @@ func LokiTLS(spec *flowslatest.FlowCollectorLoki) *flowslatest.ClientTLS { func LokiStatusTLS(spec *flowslatest.FlowCollectorLoki) *flowslatest.ClientTLS { switch spec.Mode { case flowslatest.LokiModeLokiStack: + name, _ := getLokiStackNameAndNamespace(spec.LokiStack) clientTLS := &flowslatest.ClientTLS{ Enable: true, CACert: flowslatest.CertificateReference{ Type: flowslatest.RefTypeConfigMap, - Name: spec.LokiStack.Name + "-ca-bundle", + Name: name + "-ca-bundle", CertFile: "service-ca.crt", }, InsecureSkipVerify: false, UserCert: flowslatest.CertificateReference{ Type: flowslatest.RefTypeSecret, - Name: spec.LokiStack.Name + "-query-frontend-http", + Name: name + "-query-frontend-http", CertFile: "tls.crt", CertKey: "tls.key", }, From f399d9682417406341ad59aad488869d91171d20 Mon Sep 17 00:00:00 2001 From: acmenezes Date: Thu, 7 Sep 2023 16:42:31 -0400 Subject: [PATCH 04/17] tests for v1beta2 MANUAL and LOKISTACK modes Signed-off-by: acmenezes --- api/v1alpha1/flowcollector_webhook.go | 8 +- api/v1alpha1/zz_generated.conversion.go | 15 +- api/v1alpha1/zz_generated.deepcopy.go | 2 +- api/v1beta1/zz_generated.conversion.go | 96 +++--- api/v1beta2/flowcollector_types.go | 110 ++++--- api/v1beta2/zz_generated.deepcopy.go | 51 +-- .../flows.netobserv.io_flowcollectors.yaml | 250 ++++++++++----- controllers/flowcollector_controller_test.go | 51 ++- controllers/flowlogspipeline/flp_test.go | 111 +++++-- docs/FlowCollector.md | 296 ++++++++++++++---- pkg/conditions/conditions.go | 2 +- pkg/helper/monitoring.go | 2 +- pkg/helper/monitoring_test.go | 2 +- 13 files changed, 711 insertions(+), 285 deletions(-) diff --git a/api/v1alpha1/flowcollector_webhook.go b/api/v1alpha1/flowcollector_webhook.go index 80e50e20d..89d7173f9 100644 --- a/api/v1alpha1/flowcollector_webhook.go +++ b/api/v1alpha1/flowcollector_webhook.go @@ -65,7 +65,7 @@ func (r *FlowCollector) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.Loki.Enable = restored.Spec.Loki.Enable if restored.Spec.Agent.EBPF.Features != nil { - dst.Spec.Agent.EBPF.Features = make([]v1beta1.AgentFeature, len(restored.Spec.Agent.EBPF.Features)) + dst.Spec.Agent.EBPF.Features = make([]v1beta2.AgentFeature, len(restored.Spec.Agent.EBPF.Features)) copy(dst.Spec.Agent.EBPF.Features, restored.Spec.Agent.EBPF.Features) } @@ -170,8 +170,8 @@ func Convert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in *v1beta2 } // // This function need to be manually created because conversion-gen not able to create it intentionally because -// // we have new defined fields in v1beta1 not in v1alpha1 +// // we have new defined fields in v1beta2 not in v1alpha1 // // nolint:golint,stylecheck,revive -func Convert_v1beta1_ServerTLS_To_v1alpha1_ServerTLS(in *v1beta1.ServerTLS, out *ServerTLS, s apiconversion.Scope) error { - return autoConvert_v1beta1_ServerTLS_To_v1alpha1_ServerTLS(in, out, s) +func Convert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(in *v1beta2.ServerTLS, out *ServerTLS, s apiconversion.Scope) error { + return autoConvert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(in, out, s) } diff --git a/api/v1alpha1/zz_generated.conversion.go b/api/v1alpha1/zz_generated.conversion.go index 93308e832..ffd08b199 100644 --- a/api/v1alpha1/zz_generated.conversion.go +++ b/api/v1alpha1/zz_generated.conversion.go @@ -268,11 +268,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.ServerTLS)(nil), (*ServerTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ServerTLS_To_v1alpha1_ServerTLS(a.(*v1beta1.ServerTLS), b.(*ServerTLS), scope) - }); err != nil { - return err - } return nil } @@ -664,7 +659,7 @@ func autoConvert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(in *v1bet // WARNING: in.ConversationEndTimeout requires manual conversion: does not exist in peer-type // WARNING: in.ConversationTerminatingTimeout requires manual conversion: does not exist in peer-type // WARNING: in.ClusterName requires manual conversion: does not exist in peer-type - if err := Convert_v1beta1_DebugConfig_To_v1alpha1_DebugConfig(&in.Debug, &out.Debug, s); err != nil { + if err := Convert_v1beta2_DebugConfig_To_v1alpha1_DebugConfig(&in.Debug, &out.Debug, s); err != nil { return err } return nil @@ -1021,11 +1016,3 @@ func autoConvert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(in *v1beta2.ServerTLS, // WARNING: in.ProvidedCaFile requires manual conversion: does not exist in peer-type return nil } -<<<<<<< HEAD -======= - -// Convert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS is an autogenerated conversion function. -func Convert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(in *v1beta2.ServerTLS, out *ServerTLS, s conversion.Scope) error { - return autoConvert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(in, out, s) -} ->>>>>>> 09af8ae (ADD v1beta2 with new Loki integration fields) diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index b48955b3c..1cce1fdc9 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -24,7 +24,7 @@ package v1alpha1 import ( "k8s.io/api/autoscaling/v2" "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" + runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/api/v1beta1/zz_generated.conversion.go b/api/v1beta1/zz_generated.conversion.go index 005e9a27c..a7a1bc5de 100644 --- a/api/v1beta1/zz_generated.conversion.go +++ b/api/v1beta1/zz_generated.conversion.go @@ -68,16 +68,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*ConfigOrSecret)(nil), (*v1beta2.ConfigOrSecret)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ConfigOrSecret_To_v1beta2_ConfigOrSecret(a.(*ConfigOrSecret), b.(*v1beta2.ConfigOrSecret), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.ConfigOrSecret)(nil), (*ConfigOrSecret)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ConfigOrSecret_To_v1beta1_ConfigOrSecret(a.(*v1beta2.ConfigOrSecret), b.(*ConfigOrSecret), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*ConsolePluginPortConfig)(nil), (*v1beta2.ConsolePluginPortConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(a.(*ConsolePluginPortConfig), b.(*v1beta2.ConsolePluginPortConfig), scope) }); err != nil { @@ -103,6 +93,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*FileReference)(nil), (*v1beta2.FileReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FileReference_To_v1beta2_FileReference(a.(*FileReference), b.(*v1beta2.FileReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.FileReference)(nil), (*FileReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FileReference_To_v1beta1_FileReference(a.(*v1beta2.FileReference), b.(*FileReference), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*FlowCollector)(nil), (*v1beta2.FlowCollector)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_FlowCollector_To_v1beta2_FlowCollector(a.(*FlowCollector), b.(*v1beta2.FlowCollector), scope) }); err != nil { @@ -383,30 +383,6 @@ func Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOpera return autoConvert_v1beta2_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig(in, out, s) } -func autoConvert_v1beta1_ConfigOrSecret_To_v1beta2_ConfigOrSecret(in *ConfigOrSecret, out *v1beta2.ConfigOrSecret, s conversion.Scope) error { - out.Type = v1beta2.MountableType(in.Type) - out.Name = in.Name - out.Namespace = in.Namespace - return nil -} - -// Convert_v1beta1_ConfigOrSecret_To_v1beta2_ConfigOrSecret is an autogenerated conversion function. -func Convert_v1beta1_ConfigOrSecret_To_v1beta2_ConfigOrSecret(in *ConfigOrSecret, out *v1beta2.ConfigOrSecret, s conversion.Scope) error { - return autoConvert_v1beta1_ConfigOrSecret_To_v1beta2_ConfigOrSecret(in, out, s) -} - -func autoConvert_v1beta2_ConfigOrSecret_To_v1beta1_ConfigOrSecret(in *v1beta2.ConfigOrSecret, out *ConfigOrSecret, s conversion.Scope) error { - out.Type = MountableType(in.Type) - out.Name = in.Name - out.Namespace = in.Namespace - return nil -} - -// Convert_v1beta2_ConfigOrSecret_To_v1beta1_ConfigOrSecret is an autogenerated conversion function. -func Convert_v1beta2_ConfigOrSecret_To_v1beta1_ConfigOrSecret(in *v1beta2.ConfigOrSecret, out *ConfigOrSecret, s conversion.Scope) error { - return autoConvert_v1beta2_ConfigOrSecret_To_v1beta1_ConfigOrSecret(in, out, s) -} - func autoConvert_v1beta1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(in *ConsolePluginPortConfig, out *v1beta2.ConsolePluginPortConfig, s conversion.Scope) error { out.Enable = (*bool)(unsafe.Pointer(in.Enable)) out.PortNames = *(*map[string]string)(unsafe.Pointer(&in.PortNames)) @@ -472,6 +448,32 @@ func autoConvert_v1beta2_FLPMetrics_To_v1beta1_FLPMetrics(in *v1beta2.FLPMetrics return nil } +func autoConvert_v1beta1_FileReference_To_v1beta2_FileReference(in *FileReference, out *v1beta2.FileReference, s conversion.Scope) error { + out.Type = v1beta2.MountableType(in.Type) + out.Name = in.Name + out.Namespace = in.Namespace + out.File = in.File + return nil +} + +// Convert_v1beta1_FileReference_To_v1beta2_FileReference is an autogenerated conversion function. +func Convert_v1beta1_FileReference_To_v1beta2_FileReference(in *FileReference, out *v1beta2.FileReference, s conversion.Scope) error { + return autoConvert_v1beta1_FileReference_To_v1beta2_FileReference(in, out, s) +} + +func autoConvert_v1beta2_FileReference_To_v1beta1_FileReference(in *v1beta2.FileReference, out *FileReference, s conversion.Scope) error { + out.Type = MountableType(in.Type) + out.Name = in.Name + out.Namespace = in.Namespace + out.File = in.File + return nil +} + +// Convert_v1beta2_FileReference_To_v1beta1_FileReference is an autogenerated conversion function. +func Convert_v1beta2_FileReference_To_v1beta1_FileReference(in *v1beta2.FileReference, out *FileReference, s conversion.Scope) error { + return autoConvert_v1beta2_FileReference_To_v1beta1_FileReference(in, out, s) +} + func autoConvert_v1beta1_FlowCollector_To_v1beta2_FlowCollector(in *FlowCollector, out *v1beta2.FlowCollector, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(&in.Spec, &out.Spec, s); err != nil { @@ -596,8 +598,7 @@ func autoConvert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *Flow if err := Convert_v1beta1_DebugConfig_To_v1beta2_DebugConfig(&in.Debug, &out.Debug, s); err != nil { return err } - out.EnablePktDrop = (*bool)(unsafe.Pointer(in.EnablePktDrop)) - out.EnableDNSTracking = (*bool)(unsafe.Pointer(in.EnableDNSTracking)) + out.Features = *(*[]v1beta2.AgentFeature)(unsafe.Pointer(&in.Features)) return nil } @@ -620,8 +621,7 @@ func autoConvert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(in *v1be if err := Convert_v1beta2_DebugConfig_To_v1beta1_DebugConfig(&in.Debug, &out.Debug, s); err != nil { return err } - out.EnablePktDrop = (*bool)(unsafe.Pointer(in.EnablePktDrop)) - out.EnableDNSTracking = (*bool)(unsafe.Pointer(in.EnableDNSTracking)) + out.Features = *(*[]AgentFeature)(unsafe.Pointer(&in.Features)) return nil } @@ -684,6 +684,7 @@ func autoConvert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowCo out.ConversationHeartbeatInterval = (*v1.Duration)(unsafe.Pointer(in.ConversationHeartbeatInterval)) out.ConversationEndTimeout = (*v1.Duration)(unsafe.Pointer(in.ConversationEndTimeout)) out.ConversationTerminatingTimeout = (*v1.Duration)(unsafe.Pointer(in.ConversationTerminatingTimeout)) + out.ClusterName = in.ClusterName if err := Convert_v1beta1_DebugConfig_To_v1beta2_DebugConfig(&in.Debug, &out.Debug, s); err != nil { return err } @@ -717,6 +718,7 @@ func autoConvert_v1beta2_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(in *v1beta out.ConversationHeartbeatInterval = (*v1.Duration)(unsafe.Pointer(in.ConversationHeartbeatInterval)) out.ConversationEndTimeout = (*v1.Duration)(unsafe.Pointer(in.ConversationEndTimeout)) out.ConversationTerminatingTimeout = (*v1.Duration)(unsafe.Pointer(in.ConversationTerminatingTimeout)) + out.ClusterName = in.ClusterName if err := Convert_v1beta2_DebugConfig_To_v1beta1_DebugConfig(&in.Debug, &out.Debug, s); err != nil { return err } @@ -1065,11 +1067,12 @@ func Convert_v1beta2_QuickFilter_To_v1beta1_QuickFilter(in *v1beta2.QuickFilter, func autoConvert_v1beta1_SASLConfig_To_v1beta2_SASLConfig(in *SASLConfig, out *v1beta2.SASLConfig, s conversion.Scope) error { out.Type = v1beta2.SASLType(in.Type) - if err := Convert_v1beta1_ConfigOrSecret_To_v1beta2_ConfigOrSecret(&in.Reference, &out.Reference, s); err != nil { + if err := Convert_v1beta1_FileReference_To_v1beta2_FileReference(&in.ClientIDReference, &out.ClientIDReference, s); err != nil { + return err + } + if err := Convert_v1beta1_FileReference_To_v1beta2_FileReference(&in.ClientSecretReference, &out.ClientSecretReference, s); err != nil { return err } - out.ClientIDKey = in.ClientIDKey - out.ClientSecretKey = in.ClientSecretKey return nil } @@ -1080,11 +1083,12 @@ func Convert_v1beta1_SASLConfig_To_v1beta2_SASLConfig(in *SASLConfig, out *v1bet func autoConvert_v1beta2_SASLConfig_To_v1beta1_SASLConfig(in *v1beta2.SASLConfig, out *SASLConfig, s conversion.Scope) error { out.Type = SASLType(in.Type) - if err := Convert_v1beta2_ConfigOrSecret_To_v1beta1_ConfigOrSecret(&in.Reference, &out.Reference, s); err != nil { + if err := Convert_v1beta2_FileReference_To_v1beta1_FileReference(&in.ClientIDReference, &out.ClientIDReference, s); err != nil { + return err + } + if err := Convert_v1beta2_FileReference_To_v1beta1_FileReference(&in.ClientSecretReference, &out.ClientSecretReference, s); err != nil { return err } - out.ClientIDKey = in.ClientIDKey - out.ClientSecretKey = in.ClientSecretKey return nil } @@ -1096,6 +1100,8 @@ func Convert_v1beta2_SASLConfig_To_v1beta1_SASLConfig(in *v1beta2.SASLConfig, ou func autoConvert_v1beta1_ServerTLS_To_v1beta2_ServerTLS(in *ServerTLS, out *v1beta2.ServerTLS, s conversion.Scope) error { out.Type = v1beta2.ServerTLSConfigType(in.Type) out.Provided = (*v1beta2.CertificateReference)(unsafe.Pointer(in.Provided)) + out.InsecureSkipVerify = in.InsecureSkipVerify + out.ProvidedCaFile = (*v1beta2.FileReference)(unsafe.Pointer(in.ProvidedCaFile)) return nil } @@ -1107,6 +1113,8 @@ func Convert_v1beta1_ServerTLS_To_v1beta2_ServerTLS(in *ServerTLS, out *v1beta2. func autoConvert_v1beta2_ServerTLS_To_v1beta1_ServerTLS(in *v1beta2.ServerTLS, out *ServerTLS, s conversion.Scope) error { out.Type = ServerTLSConfigType(in.Type) out.Provided = (*CertificateReference)(unsafe.Pointer(in.Provided)) + out.InsecureSkipVerify = in.InsecureSkipVerify + out.ProvidedCaFile = (*FileReference)(unsafe.Pointer(in.ProvidedCaFile)) return nil } diff --git a/api/v1beta2/flowcollector_types.go b/api/v1beta2/flowcollector_types.go index 9af30383d..8ca9cc0bc 100644 --- a/api/v1beta2/flowcollector_types.go +++ b/api/v1beta2/flowcollector_types.go @@ -39,7 +39,7 @@ const ( // Defines the desired state of the FlowCollector resource. //

-// *: the mention of "unsupported", or "deprecated" for a feature throughout this document means that this feature +// *: the mention of "unsupported", or "deprecated" for a feature throughout this document means that this feature // is not officially supported by Red Hat. It might have been, for instance, contributed by the community // and accepted without a formal agreement for maintenance. The product maintainers might provide some support // for these features as a best effort only. @@ -90,7 +90,7 @@ type FlowCollectorSpec struct { type FlowCollectorAgent struct { // `type` selects the flows tracing agent. Possible values are:
// - `EBPF` (default) to use NetObserv eBPF agent.
- // - `IPFIX` - deprecated (*) - to use the legacy IPFIX collector.
+ // - `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
// `EBPF` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. // `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, // but they would require manual configuration). @@ -99,7 +99,7 @@ type FlowCollectorAgent struct { // +kubebuilder:default:=EBPF Type string `json:"type,omitempty"` - // `ipfix` - deprecated (*) - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` + // `ipfix` [deprecated (*)] - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` // is set to `IPFIX`. // +optional IPFIX FlowCollectorIPFIX `json:"ipfix,omitempty"` @@ -147,6 +147,19 @@ type FlowCollectorIPFIX struct { OVNKubernetes OVNKubernetesConfig `json:"ovnKubernetes,omitempty" mapstructure:"-"` } +// Agent feature, can be one of:
+// - `PacketDrop`, to track packet drops.
+// - `DNSTracking`, to track specific information on DNS traffic.
+// - `FlowRTT`, to track TCP latency. [Unsupported (*)].
+// +kubebuilder:validation:Enum:="PacketDrop";"DNSTracking";"FlowRTT" +type AgentFeature string + +const ( + PacketDrop AgentFeature = "PacketDrop" + DNSTracking AgentFeature = "DNSTracking" + FlowRTT AgentFeature = "FlowRTT" +) + // `FlowCollectorEBPF` defines a FlowCollector that uses eBPF to collect the flows information type FlowCollectorEBPF struct { // Important: Run "make generate" to regenerate code after modifying this file @@ -220,19 +233,16 @@ type FlowCollectorEBPF struct { // +optional Debug DebugConfig `json:"debug,omitempty"` - // Enable the Packets drop flows logging feature. This feature requires mounting + // List of additional features to enable. They are all disabled by default. Enabling additional features may have performance impacts. Possible values are:
+ // - `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting // the kernel debug filesystem, so the eBPF pod has to run as privileged. - // If the spec.agent.eBPF.privileged parameter is not set, an error is reported. - //+kubebuilder:default:=false - //+optional - EnablePktDrop *bool `json:"enablePktDrop,omitempty"` - - // Enable the DNS tracking feature. This feature requires mounting + // If the `spec.agent.eBPF.privileged` parameter is not set, an error is reported.
+ // - `DNSTracking`: enable the DNS tracking feature. This feature requires mounting // the kernel debug filesystem hence the eBPF pod has to run as privileged. - // If the spec.agent.eBPF.privileged parameter is not set, an error is reported. - //+kubebuilder:default:=false - //+optional - EnableDNSTracking *bool `json:"enableDNSTracking,omitempty"` + // If the `spec.agent.eBPF.privileged` parameter is not set, an error is reported.
+ // - `FlowRTT` [unsupported (*)]: enable flow latency (RTT) calculations in the eBPF agent during TCP handshakes. This feature better works with `sampling` set to 1.
+ // +optional + Features []AgentFeature `json:"features,omitempty"` } // `FlowCollectorKafka` defines the desired Kafka config of FlowCollector @@ -251,7 +261,7 @@ type FlowCollectorKafka struct { // +optional TLS ClientTLS `json:"tls"` - // SASL authentication configuration. Unsupported (*) + // SASL authentication configuration. [Unsupported (*)]. // +optional // +k8s:conversion-gen=false SASL SASLConfig `json:"sasl"` @@ -295,6 +305,15 @@ type ServerTLS struct { // TLS configuration when `type` is set to `PROVIDED`. // +optional Provided *CertificateReference `json:"provided"` + + //+kubebuilder:default:=false + // insecureSkipVerify allows skipping client-side verification of the provided certificate + // If set to true, ProvidedCaFile field will be ignored + InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"` + + // Reference to the CA file will be ignored + // +optional + ProvidedCaFile *FileReference `json:"providedCaFile,omitempty"` } // `MetricsServerConfig` define the metrics server endpoint configuration for Prometheus scraper @@ -330,8 +349,9 @@ type FLPMetrics struct { Server MetricsServerConfig `json:"server,omitempty"` // `ignoreTags` is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . - // Available tags are: `egress`, `ingress`, `flows`, `bytes`, `packets`, `namespaces`, `nodes`, `workloads`. - //+kubebuilder:default:={"egress","packets"} + // Available tags are: `egress`, `ingress`, `flows`, `bytes`, `packets`, `namespaces`, `nodes`, `workloads`, `nodes-flows`, `namespaces-flows`, `workloads-flows`. + // Namespace-based metrics are covered by both `workloads` and `namespaces` tags, hence it is recommended to always ignore one of them (`workloads` offering a finer granularity). + //+kubebuilder:default:={"egress","packets","nodes-flows","namespaces-flows","workloads-flows","namespaces"} // +optional IgnoreTags []string `json:"ignoreTags"` @@ -448,6 +468,11 @@ type FlowCollectorFLP struct { // `conversationTerminatingTimeout` is the time to wait from detected FIN flag to end a conversation. Only relevant for TCP flows. ConversationTerminatingTimeout *metav1.Duration `json:"conversationTerminatingTimeout,omitempty"` + //+kubebuilder:default:="" + // +optional + // `clusterName` is the name of the cluster to appear in the flows data. This is useful in a multi-cluster context. When using OpenShift, leave empty to make it automatically determined. + ClusterName string `json:"clusterName,omitempty"` + // `debug` allows setting some aspects of the internal configuration of the flow processor. // This section is aimed exclusively for debugging and fine-grained performance optimizations, // such as GOGC and GOMAXPROCS env vars. Users setting its values do it at their own risk. @@ -526,7 +551,7 @@ type LokiManualParams struct { // `authToken` describes the way to get a token to authenticate to Loki.
// - `DISABLED` will not send any token with the request.
// - `FORWARD` will forward the user token for authorization.
- // - `HOST` - deprecated (*) - will use the local pod service account to authenticate to Loki.
+ // - `HOST` [deprecated (*)] - will use the local pod service account to authenticate to Loki.
// When using the Loki Operator, this must be set to `FORWARD`. AuthToken string `json:"authToken,omitempty"` @@ -720,6 +745,24 @@ const ( RefTypeConfigMap MountableType = "configmap" ) +type FileReference struct { + //+kubebuilder:validation:Enum=configmap;secret + // Type for the file reference: "configmap" or "secret" + Type MountableType `json:"type,omitempty"` + + // Name of the config map or secret containing the file + Name string `json:"name,omitempty"` + + // Namespace of the config map or secret containing the file. If omitted, assumes same namespace as where NetObserv is deployed. + // If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. + // +optional + //+kubebuilder:default:="" + Namespace string `json:"namespace,omitempty"` + + // File name within the config map or secret + File string `json:"file,omitempty"` +} + type CertificateReference struct { //+kubebuilder:validation:Enum=configmap;secret // Type for the certificate reference: `configmap` or `secret` @@ -776,29 +819,11 @@ type SASLConfig struct { // Type of SASL authentication to use, or `DISABLED` if SASL is not used Type SASLType `json:"type,omitempty"` - // Reference to the secret or config map containing the client ID and secret - Reference ConfigOrSecret `json:"reference,omitempty"` - - // Key for client ID within the provided `reference` - ClientIDKey string `json:"clientIDKey,omitempty"` + // Reference to the secret or config map containing the client ID + ClientIDReference FileReference `json:"clientIDReference,omitempty"` - // Key for client secret within the provided `reference` - ClientSecretKey string `json:"clientSecretKey,omitempty"` -} - -type ConfigOrSecret struct { - //+kubebuilder:validation:Enum=configmap;secret - // Type for the reference: "configmap" or "secret" - Type MountableType `json:"type,omitempty"` - - // Name of the config map or secret to reference - Name string `json:"name,omitempty"` - - // Namespace of the config map or secret. If omitted, assumes same namespace as where NetObserv is deployed. - // If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. - // +optional - //+kubebuilder:default:="" - Namespace string `json:"namespace,omitempty"` + // Reference to the secret or config map containing the client secret + ClientSecretReference FileReference `json:"clientSecretReference,omitempty"` } // `DebugConfig` allows tweaking some aspects of the internal configuration of the agent and FLP. @@ -822,7 +847,7 @@ const ( // `FlowCollectorExporter` defines an additional exporter to send enriched flows to. type FlowCollectorExporter struct { - // `type` selects the type of exporters. The available options are `KAFKA` and `IPFIX`. `IPFIX` is unsupported (*). + // `type` selects the type of exporters. The available options are `KAFKA` and `IPFIX`. `IPFIX` is unsupported (*). // +unionDiscriminator // +kubebuilder:validation:Enum:="KAFKA";"IPFIX" // +kubebuilder:validation:Required @@ -832,7 +857,7 @@ type FlowCollectorExporter struct { // +optional Kafka FlowCollectorKafka `json:"kafka,omitempty"` - // IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. Unsupported (*). + // IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. [Unsupported (*)]. // +optional IPFIX FlowCollectorIPFIXReceiver `json:"ipfix,omitempty"` } @@ -856,6 +881,7 @@ type FlowCollectorStatus struct { // +kubebuilder:printcolumn:name="Deployment Model",type="string",JSONPath=`.spec.deploymentModel` // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[*].reason" // +kubebuilder:storageversion + // `FlowCollector` is the schema for the network flows collection API, which pilots and configures the underlying deployments. type FlowCollector struct { metav1.TypeMeta `json:",inline"` diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go index a72359f6b..1719c4930 100644 --- a/api/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -74,21 +74,6 @@ func (in *ClusterNetworkOperatorConfig) DeepCopy() *ClusterNetworkOperatorConfig return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigOrSecret) DeepCopyInto(out *ConfigOrSecret) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigOrSecret. -func (in *ConfigOrSecret) DeepCopy() *ConfigOrSecret { - if in == nil { - return nil - } - out := new(ConfigOrSecret) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConsolePluginPortConfig) DeepCopyInto(out *ConsolePluginPortConfig) { *out = *in @@ -164,6 +149,21 @@ func (in *FLPMetrics) DeepCopy() *FLPMetrics { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileReference) DeepCopyInto(out *FileReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileReference. +func (in *FileReference) DeepCopy() *FileReference { + if in == nil { + return nil + } + out := new(FileReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FlowCollector) DeepCopyInto(out *FlowCollector) { *out = *in @@ -268,15 +268,10 @@ func (in *FlowCollectorEBPF) DeepCopyInto(out *FlowCollectorEBPF) { copy(*out, *in) } in.Debug.DeepCopyInto(&out.Debug) - if in.EnablePktDrop != nil { - in, out := &in.EnablePktDrop, &out.EnablePktDrop - *out = new(bool) - **out = **in - } - if in.EnableDNSTracking != nil { - in, out := &in.EnableDNSTracking, &out.EnableDNSTracking - *out = new(bool) - **out = **in + if in.Features != nil { + in, out := &in.Features, &out.Features + *out = make([]AgentFeature, len(*in)) + copy(*out, *in) } } @@ -668,7 +663,8 @@ func (in *QuickFilter) DeepCopy() *QuickFilter { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SASLConfig) DeepCopyInto(out *SASLConfig) { *out = *in - out.Reference = in.Reference + out.ClientIDReference = in.ClientIDReference + out.ClientSecretReference = in.ClientSecretReference } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SASLConfig. @@ -689,6 +685,11 @@ func (in *ServerTLS) DeepCopyInto(out *ServerTLS) { *out = new(CertificateReference) **out = **in } + if in.ProvidedCaFile != nil { + in, out := &in.ProvidedCaFile, &out.ProvidedCaFile + *out = new(FileReference) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerTLS. diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml index 0e8c84ff1..862d764bf 100644 --- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml +++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml @@ -4940,12 +4940,12 @@ spec: type: object spec: description: 'Defines the desired state of the FlowCollector resource. -

*: the mention of "unsupported", or "deprecated" - for a feature throughout this document means that this feature is not - officially supported by Red Hat. It might have been, for instance, contributed - by the community and accepted without a formal agreement for maintenance. - The product maintainers might provide some support for these features - as a best effort only.' +

*: the mention of "unsupported", or "deprecated" for a feature + throughout this document means that this feature is not officially supported + by Red Hat. It might have been, for instance, contributed by the community + and accepted without a formal agreement for maintenance. The product + maintainers might provide some support for these features as a best + effort only.' properties: agent: description: Agent configuration for flows extraction. @@ -4993,20 +4993,6 @@ spec: they are only useful in edge debug or support scenarios.' type: object type: object - enableDNSTracking: - default: false - description: Enable the DNS tracking feature. This feature - requires mounting the kernel debug filesystem hence the - eBPF pod has to run as privileged. If the spec.agent.eBPF.privileged - parameter is not set, an error is reported. - type: boolean - enablePktDrop: - default: false - description: Enable the Packets drop flows logging feature. - This feature requires mounting the kernel debug filesystem, - so the eBPF pod has to run as privileged. If the spec.agent.eBPF.privileged - parameter is not set, an error is reported. - type: boolean excludeInterfaces: default: - lo @@ -5017,6 +5003,32 @@ spec: items: type: string type: array + features: + description: 'List of additional features to enable. They + are all disabled by default. Enabling additional features + may have performance impacts. Possible values are:
- + `PacketDrop`: enable the packets drop flows logging feature. + This feature requires mounting the kernel debug filesystem, + so the eBPF pod has to run as privileged. If the `spec.agent.eBPF.privileged` + parameter is not set, an error is reported.
- `DNSTracking`: + enable the DNS tracking feature. This feature requires mounting + the kernel debug filesystem hence the eBPF pod has to run + as privileged. If the `spec.agent.eBPF.privileged` parameter + is not set, an error is reported.
- `FlowRTT` [unsupported + (*)]: enable flow latency (RTT) calculations in the eBPF + agent during TCP handshakes. This feature better works with + `sampling` set to 1.
' + items: + description: Agent feature, can be one of:
- `PacketDrop`, + to track packet drops.
- `DNSTracking`, to track specific + information on DNS traffic.
- `FlowRTT`, to track + TCP latency. [Unsupported (*)].
+ enum: + - PacketDrop + - DNSTracking + - FlowRTT + type: string + type: array imagePullPolicy: default: IfNotPresent description: '`imagePullPolicy` is the Kubernetes pull policy @@ -5108,8 +5120,8 @@ spec: type: integer type: object ipfix: - description: '`ipfix` - deprecated (*) - describes the - settings related to the IPFIX-based flow reporter when `spec.agent.type` + description: '`ipfix` [deprecated (*)] - describes the settings + related to the IPFIX-based flow reporter when `spec.agent.type` is set to `IPFIX`.' properties: cacheActiveTimeout: @@ -5183,12 +5195,11 @@ spec: default: EBPF description: '`type` selects the flows tracing agent. Possible values are:
- `EBPF` (default) to use NetObserv eBPF agent.
- - `IPFIX` - deprecated (*) - to use the legacy IPFIX - collector.
`EBPF` is recommended as it offers better performances - and should work regardless of the CNI installed on the cluster. - `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work - if they support exporting IPFIX, but they would require manual - configuration).' + - `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
+ `EBPF` is recommended as it offers better performances and should + work regardless of the CNI installed on the cluster. `IPFIX` + works with OVN-Kubernetes CNI (other CNIs could work if they + support exporting IPFIX, but they would require manual configuration).' enum: - EBPF - IPFIX @@ -5895,7 +5906,7 @@ spec: properties: ipfix: description: IPFIX configuration, such as the IP address and - port to send enriched IPFIX flows to. Unsupported (*). + port to send enriched IPFIX flows to. [Unsupported (*)]. properties: targetHost: default: "" @@ -5924,34 +5935,61 @@ spec: description: Address of the Kafka server type: string sasl: - description: SASL authentication configuration. Unsupported - (*) + description: SASL authentication configuration. [Unsupported + (*)]. properties: - clientIDKey: - description: Key for client ID within the provided `reference` - type: string - clientSecretKey: - description: Key for client secret within the provided - `reference` - type: string - reference: + clientIDReference: description: Reference to the secret or config map containing - the client ID and secret + the client ID properties: + file: + description: File name within the config map or + secret + type: string name: - description: Name of the config map or secret to - reference + description: Name of the config map or secret containing + the file type: string namespace: default: "" - description: Namespace of the config map or secret. - If omitted, assumes same namespace as where NetObserv - is deployed. If the namespace is different, the - config map or the secret will be copied so that - it can be mounted as required. + description: Namespace of the config map or secret + containing the file. If omitted, assumes same + namespace as where NetObserv is deployed. If the + namespace is different, the config map or the + secret will be copied so that it can be mounted + as required. + type: string + type: + description: 'Type for the file reference: "configmap" + or "secret"' + enum: + - configmap + - secret + type: string + type: object + clientSecretReference: + description: Reference to the secret or config map containing + the client secret + properties: + file: + description: File name within the config map or + secret + type: string + name: + description: Name of the config map or secret containing + the file + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing the file. If omitted, assumes same + namespace as where NetObserv is deployed. If the + namespace is different, the config map or the + secret will be copied so that it can be mounted + as required. type: string type: - description: 'Type for the reference: "configmap" + description: 'Type for the file reference: "configmap" or "secret"' enum: - configmap @@ -6066,8 +6104,7 @@ spec: type: object type: description: '`type` selects the type of exporters. The available - options are `KAFKA` and `IPFIX`. `IPFIX` is unsupported - (*).' + options are `KAFKA` and `IPFIX`. `IPFIX` is unsupported (*).' enum: - KAFKA - IPFIX @@ -6086,32 +6123,57 @@ spec: description: Address of the Kafka server type: string sasl: - description: SASL authentication configuration. Unsupported - (*) + description: SASL authentication configuration. [Unsupported (*)]. properties: - clientIDKey: - description: Key for client ID within the provided `reference` - type: string - clientSecretKey: - description: Key for client secret within the provided `reference` - type: string - reference: + clientIDReference: description: Reference to the secret or config map containing - the client ID and secret + the client ID properties: + file: + description: File name within the config map or secret + type: string name: - description: Name of the config map or secret to reference + description: Name of the config map or secret containing + the file type: string namespace: default: "" - description: Namespace of the config map or secret. If - omitted, assumes same namespace as where NetObserv is - deployed. If the namespace is different, the config - map or the secret will be copied so that it can be mounted - as required. + description: Namespace of the config map or secret containing + the file. If omitted, assumes same namespace as where + NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string + type: + description: 'Type for the file reference: "configmap" + or "secret"' + enum: + - configmap + - secret + type: string + type: object + clientSecretReference: + description: Reference to the secret or config map containing + the client secret + properties: + file: + description: File name within the config map or secret + type: string + name: + description: Name of the config map or secret containing + the file + type: string + namespace: + default: "" + description: Namespace of the config map or secret containing + the file. If omitted, assumes same namespace as where + NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. type: string type: - description: 'Type for the reference: "configmap" or "secret"' + description: 'Type for the file reference: "configmap" + or "secret"' enum: - configmap - secret @@ -6260,8 +6322,8 @@ spec: description: '`authToken` describes the way to get a token to authenticate to Loki.
- `DISABLED` will not send any token with the request.
- `FORWARD` will forward - the user token for authorization.
- `HOST` - deprecated - (*) - will use the local pod service account to authenticate + the user token for authorization.
- `HOST` [deprecated + (*)] - will use the local pod service account to authenticate to Loki.
When using the Loki Operator, this must be set to `FORWARD`.' enum: @@ -6516,6 +6578,12 @@ spec: and forwards them to the Loki persistence layer and/or any available exporter.' properties: + clusterName: + default: "" + description: '`clusterName` is the name of the cluster to appear + in the flows data. This is useful in a multi-cluster context. + When using OpenShift, leave empty to make it automatically determined.' + type: string conversationEndTimeout: default: 10s description: '`conversationEndTimeout` is the time to wait after @@ -7182,11 +7250,19 @@ spec: default: - egress - packets + - nodes-flows + - namespaces-flows + - workloads-flows + - namespaces description: '`ignoreTags` is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . Available tags are: `egress`, `ingress`, `flows`, `bytes`, - `packets`, `namespaces`, `nodes`, `workloads`.' + `packets`, `namespaces`, `nodes`, `workloads`, `nodes-flows`, + `namespaces-flows`, `workloads-flows`. Namespace-based metrics + are covered by both `workloads` and `namespaces` tags, hence + it is recommended to always ignore one of them (`workloads` + offering a finer granularity).' items: type: string type: array @@ -7204,6 +7280,12 @@ spec: tls: description: TLS configuration. properties: + insecureSkipVerify: + default: false + description: insecureSkipVerify allows skipping client-side + verification of the provided certificate If set + to true, ProvidedCaFile field will be ignored + type: boolean provided: description: TLS configuration when `type` is set to `PROVIDED`. @@ -7240,6 +7322,34 @@ spec: - secret type: string type: object + providedCaFile: + description: Reference to the CA file will be ignored + properties: + file: + description: File name within the config map or + secret + type: string + name: + description: Name of the config map or secret + containing the file + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing the file. If omitted, assumes same + namespace as where NetObserv is deployed. If + the namespace is different, the config map or + the secret will be copied so that it can be + mounted as required. + type: string + type: + description: 'Type for the file reference: "configmap" + or "secret"' + enum: + - configmap + - secret + type: string + type: object type: default: DISABLED description: Select the type of TLS configuration:
diff --git a/controllers/flowcollector_controller_test.go b/controllers/flowcollector_controller_test.go index 01dfd0cb2..3a6dd3945 100644 --- a/controllers/flowcollector_controller_test.go +++ b/controllers/flowcollector_controller_test.go @@ -632,7 +632,7 @@ func flowCollectorControllerSpecs() { }) }) - Context("Using certificates", func() { + Context("Using certificates with loki manual mode", func() { flpDS := appsv1.DaemonSet{} It("Should update Loki to use TLS", func() { // Create CM certificate @@ -682,6 +682,55 @@ func flowCollectorControllerSpecs() { }) }) + Context("Using Certificates With Loki in LokiStack Mode", func() { + flpDS := appsv1.DaemonSet{} + It("Should update Loki config successfully", func() { + // Create CM certificate + Expect(k8sClient.Create(ctx, &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "lokistack-gateway-ca-bundle", + Namespace: operatorNamespace, + }, + Data: map[string]string{"service-ca.crt": "certificate data"}, + })).Should(Succeed()) + UpdateCR(crKey, func(fc *flowslatest.FlowCollector) { + fc.Spec.Loki.Mode = "LOKISTACK" + fc.Spec.Loki.LokiStack = &flowslatest.LokiStack{ + Name: "lokistack", + Namespace: operatorNamespace, + } + }) + }) + It("Should have certificate mounted", func() { + By("Expecting certificate mounted") + Eventually(func() interface{} { + if err := k8sClient.Get(ctx, flpKey1, &flpDS); err != nil { + return err + } + return flpDS.Spec.Template.Spec.Volumes + }, timeout, interval).Should(HaveLen(3)) + Expect(flpDS.Spec.Template.Spec.Volumes[0].Name).To(Equal("config-volume")) + Expect(flpDS.Spec.Template.Spec.Volumes[1].Name).To(Equal("flowlogs-pipeline")) + Expect(flpDS.Spec.Template.Spec.Volumes[2].Name).To(Equal("loki-certs-ca")) + }) + + It("Should restore no TLS config in manual mode", func() { + UpdateCR(crKey, func(fc *flowslatest.FlowCollector) { + fc.Spec.Loki.Mode = "MANUAL" + fc.Spec.Loki.Manual.TLS = flowslatest.ClientTLS{ + Enable: false, + } + }) + Eventually(func() interface{} { + if err := k8sClient.Get(ctx, flpKey1, &flpDS); err != nil { + return err + } + return flpDS.Spec.Template.Spec.Volumes + }, timeout, interval).Should(HaveLen(1)) + Expect(flpDS.Spec.Template.Spec.Volumes[0].Name).To(Equal("config-volume")) + }) + }) + Context("Changing namespace", func() { It("Should update namespace successfully", func() { UpdateCR(crKey, func(fc *flowslatest.FlowCollector) { diff --git a/controllers/flowlogspipeline/flp_test.go b/controllers/flowlogspipeline/flp_test.go index 43556391d..f771de096 100644 --- a/controllers/flowlogspipeline/flp_test.go +++ b/controllers/flowlogspipeline/flp_test.go @@ -53,7 +53,8 @@ var outputRecordTypes = flowslatest.LogTypeAll const testNamespace = "flp" -func getConfig() flowslatest.FlowCollectorSpec { +func getConfig(lokiMode ...string) flowslatest.FlowCollectorSpec { + return flowslatest.FlowCollectorSpec{ DeploymentModel: flowslatest.DeploymentModelDirect, Agent: flowslatest.FlowCollectorAgent{Type: flowslatest.AgentIPFIX}, @@ -98,22 +99,7 @@ func getConfig() flowslatest.FlowCollectorSpec { Duration: conntrackTerminatingTimeout, }, }, - Loki: flowslatest.FlowCollectorLoki{Manual: flowslatest.LokiManualParams{ - IngesterURL: "http://loki:3100/"}, - BatchWait: &metav1.Duration{ - Duration: 1, - }, - BatchSize: 102400, - MinBackoff: &metav1.Duration{ - Duration: 1, - }, - MaxBackoff: &metav1.Duration{ - Duration: 300, - }, - Enable: pointer.Bool(true), - MaxRetries: pointer.Int32(10), - StaticLabels: map[string]string{"app": "netobserv-flowcollector"}, - }, + Loki: getLoki(lokiMode...), Kafka: flowslatest.FlowCollectorKafka{ Address: "kafka", Topic: "flp", @@ -121,6 +107,49 @@ func getConfig() flowslatest.FlowCollectorSpec { } } +func getLoki(lokiMode ...string) flowslatest.FlowCollectorLoki { + + if lokiMode != nil { + if lokiMode[0] == "LOKISTACK" { + return flowslatest.FlowCollectorLoki{Mode: "LOKISTACK", LokiStack: &flowslatest.LokiStack{ + Name: "lokistack", + Namespace: "ls-namespace", + }, + BatchWait: &metav1.Duration{ + Duration: 1, + }, + BatchSize: 102400, + MinBackoff: &metav1.Duration{ + Duration: 1, + }, + MaxBackoff: &metav1.Duration{ + Duration: 300, + }, + Enable: pointer.Bool(true), + MaxRetries: pointer.Int32(10), + StaticLabels: map[string]string{"app": "netobserv-flowcollector"}, + } + } + } + // defaults to MANUAL mode if no other mode was selected + return flowslatest.FlowCollectorLoki{Mode: "MANUAL", Manual: flowslatest.LokiManualParams{ + IngesterURL: "http://loki:3100/"}, + BatchWait: &metav1.Duration{ + Duration: 1, + }, + BatchSize: 102400, + MinBackoff: &metav1.Duration{ + Duration: 1, + }, + MaxBackoff: &metav1.Duration{ + Duration: 300, + }, + Enable: pointer.Bool(true), + MaxRetries: pointer.Int32(10), + StaticLabels: map[string]string{"app": "netobserv-flowcollector"}, + } +} + func getConfigNoHPA() flowslatest.FlowCollectorSpec { cfg := getConfig() cfg.Processor.KafkaConsumerAutoscaler.Status = flowslatest.HPAStatusDisabled @@ -597,7 +626,7 @@ func TestPrometheusRuleChanged(t *testing.T) { assert.Contains(report.String(), "PrometheusRule labels changed") } -func TestConfigMapShouldDeserializeAsJSON(t *testing.T) { +func TestConfigMapShouldDeserializeAsJSONWithLokiManual(t *testing.T) { assert := assert.New(t) ns := "namespace" @@ -646,6 +675,52 @@ func TestConfigMapShouldDeserializeAsJSON(t *testing.T) { assert.Equal(cfg.Processor.Metrics.Server.Port, int32(decoded.MetricsSettings.Port)) } +func TestConfigMapShouldDeserializeAsJSONWithLokiStack(t *testing.T) { + assert := assert.New(t) + + ns := "namespace" + cfg := getConfig("LOKISTACK") + loki := cfg.Loki + b := monoBuilder(ns, &cfg) + cm, digest, err := b.configMap() + assert.NoError(err) + assert.NotEmpty(t, digest) + + assert.Equal("dev", cm.Labels["version"]) + + data, ok := cm.Data[configFile] + assert.True(ok) + + var decoded config.ConfigFileStruct + err = json.Unmarshal([]byte(data), &decoded) + + assert.Nil(err) + assert.Equal("trace", decoded.LogLevel) + + params := decoded.Parameters + assert.Len(params, 6) + assert.Equal(cfg.Processor.Port, int32(params[0].Ingest.Collector.Port)) + + lokiCfg := params[3].Write.Loki + assert.Equal("https://lokistack-gateway-http.ls-namespace.svc:8080/api/logs/v1/network/", lokiCfg.URL) + assert.Equal("network", lokiCfg.TenantID) + assert.Equal("Bearer", lokiCfg.ClientConfig.Authorization.Type) + assert.Equal("/var/run/secrets/tokens/flowlogs-pipeline", lokiCfg.ClientConfig.Authorization.CredentialsFile) + assert.Equal(false, lokiCfg.ClientConfig.TLSConfig.InsecureSkipVerify) + assert.Equal("/var/loki-certs-ca/service-ca.crt", lokiCfg.ClientConfig.TLSConfig.CAFile) + assert.Equal("", lokiCfg.ClientConfig.TLSConfig.CertFile) + assert.Equal("", lokiCfg.ClientConfig.TLSConfig.KeyFile) + assert.Equal(loki.BatchWait.Duration.String(), lokiCfg.BatchWait) + assert.Equal(loki.MinBackoff.Duration.String(), lokiCfg.MinBackoff) + assert.Equal(loki.MaxBackoff.Duration.String(), lokiCfg.MaxBackoff) + assert.EqualValues(*loki.MaxRetries, lokiCfg.MaxRetries) + assert.EqualValues(loki.BatchSize, lokiCfg.BatchSize) + assert.EqualValues([]string{"SrcK8S_Namespace", "SrcK8S_OwnerName", "DstK8S_Namespace", "DstK8S_OwnerName", "FlowDirection", "_RecordType"}, lokiCfg.Labels) + assert.Equal(`{app="netobserv-flowcollector"}`, fmt.Sprintf("%v", lokiCfg.StaticLabels)) + + assert.Equal(cfg.Processor.Metrics.Server.Port, int32(decoded.MetricsSettings.Port)) +} + func TestAutoScalerUpdateCheck(t *testing.T) { assert := assert.New(t) diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md index 251300cfe..a110c14d0 100644 --- a/docs/FlowCollector.md +++ b/docs/FlowCollector.md @@ -8626,7 +8626,7 @@ Resource Types:
@@ -8645,7 +8645,7 @@ Resource Types: -Defines the desired state of the FlowCollector resource.

*: the mention of "unsupported", or "deprecated" for a feature throughout this document means that this feature is not officially supported by Red Hat. It might have been, for instance, contributed by the community and accepted without a formal agreement for maintenance. The product maintainers might provide some support for these features as a best effort only. +Defines the desired state of the FlowCollector resource.

*: the mention of "unsupported", or "deprecated" for a feature throughout this document means that this feature is not officially supported by Red Hat. It might have been, for instance, contributed by the community and accepted without a formal agreement for maintenance. The product maintainers might provide some support for these features as a best effort only.
spec object - Defines the desired state of the FlowCollector resource.

*: the mention of "unsupported", or "deprecated" for a feature throughout this document means that this feature is not officially supported by Red Hat. It might have been, for instance, contributed by the community and accepted without a formal agreement for maintenance. The product maintainers might provide some support for these features as a best effort only.
+ Defines the desired state of the FlowCollector resource.

*: the mention of "unsupported", or "deprecated" for a feature throughout this document means that this feature is not officially supported by Red Hat. It might have been, for instance, contributed by the community and accepted without a formal agreement for maintenance. The product maintainers might provide some support for these features as a best effort only.
false
@@ -8748,14 +8748,14 @@ Agent configuration for flows extraction. - - - - - - - - - - @@ -8835,6 +8817,13 @@ Agent configuration for flows extraction. Default: [lo]
+ + + + + @@ -8968,7 +8957,7 @@ Agent configuration for flows extraction. -`ipfix` - deprecated (*) - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` is set to `IPFIX`. +`ipfix` [deprecated (*)] - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` is set to `IPFIX`.
ipfix object - `ipfix` - deprecated (*) - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` is set to `IPFIX`.
+ `ipfix` [deprecated (*)] - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` is set to `IPFIX`.
false
type enum - `type` selects the flows tracing agent. Possible values are:
- `EBPF` (default) to use NetObserv eBPF agent.
- `IPFIX` - deprecated (*) - to use the legacy IPFIX collector.
`EBPF` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, but they would require manual configuration).
+ `type` selects the flows tracing agent. Possible values are:
- `EBPF` (default) to use NetObserv eBPF agent.
- `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
`EBPF` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, but they would require manual configuration).

Enum: EBPF, IPFIX
Default: EBPF
@@ -8808,24 +8808,6 @@ Agent configuration for flows extraction. `debug` allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as GOGC and GOMAXPROCS env vars. Users setting its values do it at their own risk.
false
enableDNSTrackingboolean - Enable the DNS tracking feature. This feature requires mounting the kernel debug filesystem hence the eBPF pod has to run as privileged. If the spec.agent.eBPF.privileged parameter is not set, an error is reported.
-
- Default: false
-
false
enablePktDropboolean - Enable the Packets drop flows logging feature. This feature requires mounting the kernel debug filesystem, so the eBPF pod has to run as privileged. If the spec.agent.eBPF.privileged parameter is not set, an error is reported.
-
- Default: false
-
false
excludeInterfaces []string false
features[]enum + List of additional features to enable. They are all disabled by default. Enabling additional features may have performance impacts. Possible values are:
- `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting the kernel debug filesystem, so the eBPF pod has to run as privileged. If the `spec.agent.eBPF.privileged` parameter is not set, an error is reported.
- `DNSTracking`: enable the DNS tracking feature. This feature requires mounting the kernel debug filesystem hence the eBPF pod has to run as privileged. If the `spec.agent.eBPF.privileged` parameter is not set, an error is reported.
- `FlowRTT` [unsupported (*)]: enable flow latency (RTT) calculations in the eBPF agent during TCP handshakes. This feature better works with `sampling` set to 1.

+
false
imagePullPolicy enum
@@ -10279,7 +10268,7 @@ target specifies the target value for the given metric @@ -10288,7 +10277,7 @@ target specifies the target value for the given metric @@ -10307,7 +10296,7 @@ target specifies the target value for the given metric -IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. Unsupported (*). +IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. [Unsupported (*)].
type enum - `type` selects the type of exporters. The available options are `KAFKA` and `IPFIX`. `IPFIX` is unsupported (*).
+ `type` selects the type of exporters. The available options are `KAFKA` and `IPFIX`. `IPFIX` is unsupported (*).

Enum: KAFKA, IPFIX
ipfix object - IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. Unsupported (*).
+ IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. [Unsupported (*)].
false
@@ -10385,7 +10374,7 @@ Kafka configuration, such as the address and topic, to send enriched flows to. @@ -10404,7 +10393,51 @@ Kafka configuration, such as the address and topic, to send enriched flows to. -SASL authentication configuration. Unsupported (*) +SASL authentication configuration. [Unsupported (*)]. + +
sasl object - SASL authentication configuration. Unsupported (*)
+ SASL authentication configuration. [Unsupported (*)].
false
+ + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
clientIDReferenceobject + Reference to the secret or config map containing the client ID
+
false
clientSecretReferenceobject + Reference to the secret or config map containing the client secret
+
false
typeenum + Type of SASL authentication to use, or `DISABLED` if SASL is not used
+
+ Enum: DISABLED, PLAIN, SCRAM-SHA512
+ Default: DISABLED
+
false
+ + +### FlowCollector.spec.exporters[index].kafka.sasl.clientIDReference +[↩ Parent](#flowcollectorspecexportersindexkafkasasl-1) + + + +Reference to the secret or config map containing the client ID @@ -10416,46 +10449,47 @@ SASL authentication configuration. Unsupported (*) - + - + - - + +
clientIDKeyfile string - Key for client ID within the provided `reference`
+ File name within the config map or secret
false
clientSecretKeyname string - Key for client secret within the provided `reference`
+ Name of the config map or secret containing the file
false
referenceobjectnamespacestring - Reference to the secret or config map containing the client ID and secret
+ Namespace of the config map or secret containing the file. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
false
type enum - Type of SASL authentication to use, or `DISABLED` if SASL is not used
+ Type for the file reference: "configmap" or "secret"

- Enum: DISABLED, PLAIN, SCRAM-SHA512
- Default: DISABLED
+ Enum: configmap, secret
false
-### FlowCollector.spec.exporters[index].kafka.sasl.reference +### FlowCollector.spec.exporters[index].kafka.sasl.clientSecretReference [↩ Parent](#flowcollectorspecexportersindexkafkasasl-1) -Reference to the secret or config map containing the client ID and secret +Reference to the secret or config map containing the client secret @@ -10467,17 +10501,24 @@ Reference to the secret or config map containing the client ID and secret + + + + + @@ -10486,7 +10527,7 @@ Reference to the secret or config map containing the client ID and secret @@ -10703,7 +10744,7 @@ Kafka configuration, allowing to use Kafka as a broker as part of the flow colle @@ -10722,7 +10763,7 @@ Kafka configuration, allowing to use Kafka as a broker as part of the flow colle -SASL authentication configuration. Unsupported (*) +SASL authentication configuration. [Unsupported (*)].
filestring + File name within the config map or secret
+
false
name string - Name of the config map or secret to reference
+ Name of the config map or secret containing the file
false
namespace string - Namespace of the config map or secret. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ Namespace of the config map or secret containing the file. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.

Default:
type enum - Type for the reference: "configmap" or "secret"
+ Type for the file reference: "configmap" or "secret"

Enum: configmap, secret
sasl object - SASL authentication configuration. Unsupported (*)
+ SASL authentication configuration. [Unsupported (*)].
false
@@ -10734,46 +10775,91 @@ SASL authentication configuration. Unsupported (*) - + + + + + + + + + + + + + + + +
clientIDKeyclientIDReferenceobject + Reference to the secret or config map containing the client ID
+
false
clientSecretReferenceobject + Reference to the secret or config map containing the client secret
+
false
typeenum + Type of SASL authentication to use, or `DISABLED` if SASL is not used
+
+ Enum: DISABLED, PLAIN, SCRAM-SHA512
+ Default: DISABLED
+
false
+ + +### FlowCollector.spec.kafka.sasl.clientIDReference +[↩ Parent](#flowcollectorspeckafkasasl-1) + + + +Reference to the secret or config map containing the client ID + + + + + + + + + + + + - + - - + +
NameTypeDescriptionRequired
file string - Key for client ID within the provided `reference`
+ File name within the config map or secret
false
clientSecretKeyname string - Key for client secret within the provided `reference`
+ Name of the config map or secret containing the file
false
referenceobjectnamespacestring - Reference to the secret or config map containing the client ID and secret
+ Namespace of the config map or secret containing the file. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
false
type enum - Type of SASL authentication to use, or `DISABLED` if SASL is not used
+ Type for the file reference: "configmap" or "secret"

- Enum: DISABLED, PLAIN, SCRAM-SHA512
- Default: DISABLED
+ Enum: configmap, secret
false
-### FlowCollector.spec.kafka.sasl.reference +### FlowCollector.spec.kafka.sasl.clientSecretReference [↩ Parent](#flowcollectorspeckafkasasl-1) -Reference to the secret or config map containing the client ID and secret +Reference to the secret or config map containing the client secret @@ -10785,17 +10871,24 @@ Reference to the secret or config map containing the client ID and secret + + + + + @@ -10804,7 +10897,7 @@ Reference to the secret or config map containing the client ID and secret @@ -11161,7 +11254,7 @@ Loki configuration for MANUAL mode. This is the more flexible configuration. It + + + + + @@ -12792,12 +12894,28 @@ TLS configuration. + + + + + + + + + + @@ -12871,6 +12989,58 @@ TLS configuration when `type` is set to `PROVIDED`.
filestring + File name within the config map or secret
+
false
name string - Name of the config map or secret to reference
+ Name of the config map or secret containing the file
false
namespace string - Namespace of the config map or secret. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ Namespace of the config map or secret containing the file. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.

Default:
type enum - Type for the reference: "configmap" or "secret"
+ Type for the file reference: "configmap" or "secret"

Enum: configmap, secret
authToken enum - `authToken` describes the way to get a token to authenticate to Loki.
- `DISABLED` will not send any token with the request.
- `FORWARD` will forward the user token for authorization.
- `HOST` - deprecated (*) - will use the local pod service account to authenticate to Loki.
When using the Loki Operator, this must be set to `FORWARD`.
+ `authToken` describes the way to get a token to authenticate to Loki.
- `DISABLED` will not send any token with the request.
- `FORWARD` will forward the user token for authorization.
- `HOST` [deprecated (*)] - will use the local pod service account to authenticate to Loki.
When using the Loki Operator, this must be set to `FORWARD`.

Enum: DISABLED, HOST, FORWARD
Default: DISABLED
@@ -11574,6 +11667,15 @@ TLS client configuration for Loki URL.
clusterNamestring + `clusterName` is the name of the cluster to appear in the flows data. This is useful in a multi-cluster context. When using OpenShift, leave empty to make it automatically determined.
+
+ Default:
+
false
conversationEndTimeout string @@ -12720,9 +12822,9 @@ target specifies the target value for the given metric ignoreTags []string - `ignoreTags` is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . Available tags are: `egress`, `ingress`, `flows`, `bytes`, `packets`, `namespaces`, `nodes`, `workloads`.
+ `ignoreTags` is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . Available tags are: `egress`, `ingress`, `flows`, `bytes`, `packets`, `namespaces`, `nodes`, `workloads`, `nodes-flows`, `namespaces-flows`, `workloads-flows`. Namespace-based metrics are covered by both `workloads` and `namespaces` tags, hence it is recommended to always ignore one of them (`workloads` offering a finer granularity).

- Default: [egress packets]
+ Default: [egress packets nodes-flows namespaces-flows workloads-flows namespaces]
false
insecureSkipVerifyboolean + insecureSkipVerify allows skipping client-side verification of the provided certificate If set to true, ProvidedCaFile field will be ignored
+
+ Default: false
+
false
provided object TLS configuration when `type` is set to `PROVIDED`.
false
providedCaFileobject + Reference to the CA file will be ignored
+
false
type enum
+### FlowCollector.spec.processor.metrics.server.tls.providedCaFile +[↩ Parent](#flowcollectorspecprocessormetricsservertls-1) + + + +Reference to the CA file will be ignored + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
filestring + File name within the config map or secret
+
false
namestring + Name of the config map or secret containing the file
+
false
namespacestring + Namespace of the config map or secret containing the file. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+
false
typeenum + Type for the file reference: "configmap" or "secret"
+
+ Enum: configmap, secret
+
false
+ + ### FlowCollector.spec.processor.resources [↩ Parent](#flowcollectorspecprocessor-1) diff --git a/pkg/conditions/conditions.go b/pkg/conditions/conditions.go index c675bda85..7b4fc73c8 100644 --- a/pkg/conditions/conditions.go +++ b/pkg/conditions/conditions.go @@ -3,7 +3,7 @@ package conditions import ( "sort" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) diff --git a/pkg/helper/monitoring.go b/pkg/helper/monitoring.go index 23c08d893..24c59c0de 100644 --- a/pkg/helper/monitoring.go +++ b/pkg/helper/monitoring.go @@ -1,7 +1,7 @@ package helper import ( - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" corev1 "k8s.io/api/core/v1" ) diff --git a/pkg/helper/monitoring_test.go b/pkg/helper/monitoring_test.go index fe3a585f8..ca3a3f0f5 100644 --- a/pkg/helper/monitoring_test.go +++ b/pkg/helper/monitoring_test.go @@ -3,7 +3,7 @@ package helper import ( "testing" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/stretchr/testify/assert" ) From 9f52e81b6f5ddc0ae528e58a57a69c345648eadc Mon Sep 17 00:00:00 2001 From: Julien Pinsonneau Date: Thu, 21 Sep 2023 10:13:29 +0200 Subject: [PATCH 05/17] cp test + conversions + update bundle --- Makefile | 10 +- api/v1alpha1/zz_generated.conversion.go | 10 +- api/v1alpha1/zz_generated.deepcopy.go | 2 +- .../flows.netobserv.io_flowcollectors.yaml | 361 +++++++++++++----- .../flows.netobserv.io_flowcollectors.yaml | 111 +++++- .../consoleplugin/consoleplugin_test.go | 36 ++ docs/FlowCollector.md | 131 ++++++- pkg/volumes/builder.go | 17 +- 8 files changed, 541 insertions(+), 137 deletions(-) diff --git a/Makefile b/Makefile index b6cf3b6f7..ad6fbdc40 100644 --- a/Makefile +++ b/Makefile @@ -252,20 +252,14 @@ doc: crdoc ## Generate markdown documentation $(CRDOC) --resources config/crd/bases/flows.netobserv.io_flowcollectors.yaml --output docs/FlowCollector.md generate-go-conversions: $(CONVERSION_GEN) ## Run all generate-go-conversions - $(MAKE) clean-generated-conversions SRC_DIRS="./api/v1alpha1" + $(MAKE) clean-generated-conversions SRC_DIRS="./api/v1alpha1 ./api/v1beta1" $(CONVERSION_GEN) \ --input-dirs=./api/v1alpha1 \ - --build-tag=ignore_autogenerated_core \ - --output-file-base=zz_generated.conversion \ - $(CONVERSION_GEN_OUTPUT_BASE) \ - --go-header-file=./hack/boilerplate/boilerplate.generatego.txt - $(MAKE) clean-generated-conversions SRC_DIRS="./api/v1beta1" - $(CONVERSION_GEN) \ --input-dirs=./api/v1beta1 \ --build-tag=ignore_autogenerated_core \ --output-file-base=zz_generated.conversion \ $(CONVERSION_GEN_OUTPUT_BASE) \ - --go-header-file=./hack/boilerplate/boilerplate.generatego.txt + --go-header-file=./hack/boilerplate/boilerplate.generatego.txt generate: gencode manifests doc generate-go-conversions ## Run all code/file generators diff --git a/api/v1alpha1/zz_generated.conversion.go b/api/v1alpha1/zz_generated.conversion.go index ffd08b199..2cab7eca7 100644 --- a/api/v1alpha1/zz_generated.conversion.go +++ b/api/v1alpha1/zz_generated.conversion.go @@ -228,11 +228,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.ServerTLS)(nil), (*ServerTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(a.(*v1beta2.ServerTLS), b.(*ServerTLS), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*FlowCollectorLoki)(nil), (*v1beta2.FlowCollectorLoki)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(a.(*FlowCollectorLoki), b.(*v1beta2.FlowCollectorLoki), scope) }); err != nil { @@ -268,6 +263,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.ServerTLS)(nil), (*ServerTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(a.(*v1beta2.ServerTLS), b.(*ServerTLS), scope) + }); err != nil { + return err + } return nil } diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 1cce1fdc9..b48955b3c 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -24,7 +24,7 @@ package v1alpha1 import ( "k8s.io/api/autoscaling/v2" "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml index d19bb3aaa..b26d0f0de 100644 --- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml +++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml @@ -4953,12 +4953,12 @@ spec: type: object spec: description: 'Defines the desired state of the FlowCollector resource. -

*: the mention of "unsupported", or "deprecated" - for a feature throughout this document means that this feature is not - officially supported by Red Hat. It might have been, for instance, contributed - by the community and accepted without a formal agreement for maintenance. - The product maintainers might provide some support for these features - as a best effort only.' +

*: the mention of "unsupported", or "deprecated" for a feature + throughout this document means that this feature is not officially supported + by Red Hat. It might have been, for instance, contributed by the community + and accepted without a formal agreement for maintenance. The product + maintainers might provide some support for these features as a best + effort only.' properties: agent: description: Agent configuration for flows extraction. @@ -5006,20 +5006,6 @@ spec: they are only useful in edge debug or support scenarios.' type: object type: object - enableDNSTracking: - default: false - description: Enable the DNS tracking feature. This feature - requires mounting the kernel debug filesystem hence the - eBPF pod has to run as privileged. If the spec.agent.eBPF.privileged - parameter is not set, an error is reported. - type: boolean - enablePktDrop: - default: false - description: Enable the Packets drop flows logging feature. - This feature requires mounting the kernel debug filesystem, - so the eBPF pod has to run as privileged. If the spec.agent.eBPF.privileged - parameter is not set, an error is reported. - type: boolean excludeInterfaces: default: - lo @@ -5030,6 +5016,32 @@ spec: items: type: string type: array + features: + description: 'List of additional features to enable. They + are all disabled by default. Enabling additional features + may have performance impacts. Possible values are:
- + `PacketDrop`: enable the packets drop flows logging feature. + This feature requires mounting the kernel debug filesystem, + so the eBPF pod has to run as privileged. If the `spec.agent.eBPF.privileged` + parameter is not set, an error is reported.
- `DNSTracking`: + enable the DNS tracking feature. This feature requires mounting + the kernel debug filesystem hence the eBPF pod has to run + as privileged. If the `spec.agent.eBPF.privileged` parameter + is not set, an error is reported.
- `FlowRTT` [unsupported + (*)]: enable flow latency (RTT) calculations in the eBPF + agent during TCP handshakes. This feature better works with + `sampling` set to 1.
' + items: + description: Agent feature, can be one of:
- `PacketDrop`, + to track packet drops.
- `DNSTracking`, to track specific + information on DNS traffic.
- `FlowRTT`, to track + TCP latency. [Unsupported (*)].
+ enum: + - PacketDrop + - DNSTracking + - FlowRTT + type: string + type: array imagePullPolicy: default: IfNotPresent description: '`imagePullPolicy` is the Kubernetes pull policy @@ -5088,6 +5100,28 @@ spec: description: '`resources` are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. \n This field + is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -5109,7 +5143,7 @@ spec: compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object sampling: @@ -5121,8 +5155,8 @@ spec: type: integer type: object ipfix: - description: '`ipfix` - deprecated (*) - describes the - settings related to the IPFIX-based flow reporter when `spec.agent.type` + description: '`ipfix` [deprecated (*)] - describes the settings + related to the IPFIX-based flow reporter when `spec.agent.type` is set to `IPFIX`.' properties: cacheActiveTimeout: @@ -5196,12 +5230,11 @@ spec: default: EBPF description: '`type` selects the flows tracing agent. Possible values are:
- `EBPF` (default) to use NetObserv eBPF agent.
- - `IPFIX` - deprecated (*) - to use the legacy IPFIX - collector.
`EBPF` is recommended as it offers better performances - and should work regardless of the CNI installed on the cluster. - `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work - if they support exporting IPFIX, but they would require manual - configuration).' + - `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
+ `EBPF` is recommended as it offers better performances and should + work regardless of the CNI installed on the cluster. `IPFIX` + works with OVN-Kubernetes CNI (other CNIs could work if they + support exporting IPFIX, but they would require manual configuration).' enum: - EBPF - IPFIX @@ -5416,15 +5449,16 @@ spec: of a object,such as kind,name apiVersion properties: apiVersion: - description: API version of the referent + description: apiVersion is the API version of + the referent type: string kind: - description: 'Kind of the referent; More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"' + description: 'kind is the kind of the referent; + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string name: - description: 'Name of the referent; More info: - http://kubernetes.io/docs/user-guide/identifiers#names' + description: 'name is the name of the referent; + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string required: - kind @@ -5863,6 +5897,28 @@ spec: description: '`resources`, in terms of compute resources, required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be + set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in + pod.spec.resourceClaims of the Pod where this field + is used. It makes that resource available inside a + container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -5883,7 +5939,8 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + to an implementation-defined value. Requests cannot exceed + Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -5908,7 +5965,7 @@ spec: properties: ipfix: description: IPFIX configuration, such as the IP address and - port to send enriched IPFIX flows to. Unsupported (*). + port to send enriched IPFIX flows to. [Unsupported (*)]. properties: targetHost: default: "" @@ -5937,34 +5994,61 @@ spec: description: Address of the Kafka server type: string sasl: - description: SASL authentication configuration. Unsupported - (*) + description: SASL authentication configuration. [Unsupported + (*)]. properties: - clientIDKey: - description: Key for client ID within the provided `reference` - type: string - clientSecretKey: - description: Key for client secret within the provided - `reference` - type: string - reference: + clientIDReference: description: Reference to the secret or config map containing - the client ID and secret + the client ID properties: + file: + description: File name within the config map or + secret + type: string name: - description: Name of the config map or secret to - reference + description: Name of the config map or secret containing + the file type: string namespace: default: "" - description: Namespace of the config map or secret. - If omitted, assumes same namespace as where NetObserv - is deployed. If the namespace is different, the - config map or the secret will be copied so that - it can be mounted as required. + description: Namespace of the config map or secret + containing the file. If omitted, assumes same + namespace as where NetObserv is deployed. If the + namespace is different, the config map or the + secret will be copied so that it can be mounted + as required. type: string type: - description: 'Type for the reference: "configmap" + description: 'Type for the file reference: "configmap" + or "secret"' + enum: + - configmap + - secret + type: string + type: object + clientSecretReference: + description: Reference to the secret or config map containing + the client secret + properties: + file: + description: File name within the config map or + secret + type: string + name: + description: Name of the config map or secret containing + the file + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing the file. If omitted, assumes same + namespace as where NetObserv is deployed. If the + namespace is different, the config map or the + secret will be copied so that it can be mounted + as required. + type: string + type: + description: 'Type for the file reference: "configmap" or "secret"' enum: - configmap @@ -6079,8 +6163,7 @@ spec: type: object type: description: '`type` selects the type of exporters. The available - options are `KAFKA` and `IPFIX`. `IPFIX` is unsupported - (*).' + options are `KAFKA` and `IPFIX`. `IPFIX` is unsupported (*).' enum: - KAFKA - IPFIX @@ -6099,32 +6182,57 @@ spec: description: Address of the Kafka server type: string sasl: - description: SASL authentication configuration. Unsupported - (*) + description: SASL authentication configuration. [Unsupported (*)]. properties: - clientIDKey: - description: Key for client ID within the provided `reference` - type: string - clientSecretKey: - description: Key for client secret within the provided `reference` - type: string - reference: + clientIDReference: + description: Reference to the secret or config map containing + the client ID + properties: + file: + description: File name within the config map or secret + type: string + name: + description: Name of the config map or secret containing + the file + type: string + namespace: + default: "" + description: Namespace of the config map or secret containing + the file. If omitted, assumes same namespace as where + NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string + type: + description: 'Type for the file reference: "configmap" + or "secret"' + enum: + - configmap + - secret + type: string + type: object + clientSecretReference: description: Reference to the secret or config map containing - the client ID and secret + the client secret properties: + file: + description: File name within the config map or secret + type: string name: - description: Name of the config map or secret to reference + description: Name of the config map or secret containing + the file type: string namespace: default: "" - description: Namespace of the config map or secret. If - omitted, assumes same namespace as where NetObserv is - deployed. If the namespace is different, the config - map or the secret will be copied so that it can be mounted - as required. + description: Namespace of the config map or secret containing + the file. If omitted, assumes same namespace as where + NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. type: string type: - description: 'Type for the reference: "configmap" or "secret"' + description: 'Type for the file reference: "configmap" + or "secret"' enum: - configmap - secret @@ -6273,8 +6381,8 @@ spec: description: '`authToken` describes the way to get a token to authenticate to Loki.
- `DISABLED` will not send any token with the request.
- `FORWARD` will forward - the user token for authorization.
- `HOST` - deprecated - (*) - will use the local pod service account to authenticate + the user token for authorization.
- `HOST` [deprecated + (*)] - will use the local pod service account to authenticate to Loki.
When using the Loki Operator, this must be set to `FORWARD`.' enum: @@ -6529,6 +6637,12 @@ spec: and forwards them to the Loki persistence layer and/or any available exporter.' properties: + clusterName: + default: "" + description: '`clusterName` is the name of the cluster to appear + in the flows data. This is useful in a multi-cluster context. + When using OpenShift, leave empty to make it automatically determined.' + type: string conversationEndTimeout: default: 10s description: '`conversationEndTimeout` is the time to wait after @@ -6800,15 +6914,16 @@ spec: of a object,such as kind,name apiVersion properties: apiVersion: - description: API version of the referent + description: apiVersion is the API version of + the referent type: string kind: - description: 'Kind of the referent; More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"' + description: 'kind is the kind of the referent; + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string name: - description: 'Name of the referent; More info: - http://kubernetes.io/docs/user-guide/identifiers#names' + description: 'name is the name of the referent; + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string required: - kind @@ -7195,11 +7310,19 @@ spec: default: - egress - packets + - nodes-flows + - namespaces-flows + - workloads-flows + - namespaces description: '`ignoreTags` is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . Available tags are: `egress`, `ingress`, `flows`, `bytes`, - `packets`, `namespaces`, `nodes`, `workloads`.' + `packets`, `namespaces`, `nodes`, `workloads`, `nodes-flows`, + `namespaces-flows`, `workloads-flows`. Namespace-based metrics + are covered by both `workloads` and `namespaces` tags, hence + it is recommended to always ignore one of them (`workloads` + offering a finer granularity).' items: type: string type: array @@ -7217,6 +7340,12 @@ spec: tls: description: TLS configuration. properties: + insecureSkipVerify: + default: false + description: insecureSkipVerify allows skipping client-side + verification of the provided certificate If set + to true, ProvidedCaFile field will be ignored + type: boolean provided: description: TLS configuration when `type` is set to `PROVIDED`. @@ -7253,6 +7382,34 @@ spec: - secret type: string type: object + providedCaFile: + description: Reference to the CA file will be ignored + properties: + file: + description: File name within the config map or + secret + type: string + name: + description: Name of the config map or secret + containing the file + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing the file. If omitted, assumes same + namespace as where NetObserv is deployed. If + the namespace is different, the config map or + the secret will be copied so that it can be + mounted as required. + type: string + type: + description: 'Type for the file reference: "configmap" + or "secret"' + enum: + - configmap + - secret + type: string + type: object type: default: DISABLED description: Select the type of TLS configuration:
@@ -7294,6 +7451,28 @@ spec: description: '`resources` are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be + set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in + pod.spec.resourceClaims of the Pod where this field + is used. It makes that resource available inside a + container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -7314,7 +7493,8 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + to an implementation-defined value. Requests cannot exceed + Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -7329,13 +7509,14 @@ spec: description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: - \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type - \ // +patchStrategy=merge // +listType=map // +listMapKey=type - \ Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` - \n // other fields }" + \n \ttype FooStatus struct{ \t // Represents the observations + of a foo's current state. \t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\" \t // + +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map + \t // +listMapKey=type \t Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields + \t}" properties: lastTransitionTime: description: lastTransitionTime is the last time the condition diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml index 862d764bf..efd774e4b 100644 --- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml +++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml @@ -5087,6 +5087,28 @@ spec: description: '`resources` are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. \n This field + is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -5108,7 +5130,7 @@ spec: compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object sampling: @@ -5414,15 +5436,16 @@ spec: of a object,such as kind,name apiVersion properties: apiVersion: - description: API version of the referent + description: apiVersion is the API version of + the referent type: string kind: - description: 'Kind of the referent; More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"' + description: 'kind is the kind of the referent; + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string name: - description: 'Name of the referent; More info: - http://kubernetes.io/docs/user-guide/identifiers#names' + description: 'name is the name of the referent; + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string required: - kind @@ -5861,6 +5884,28 @@ spec: description: '`resources`, in terms of compute resources, required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be + set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in + pod.spec.resourceClaims of the Pod where this field + is used. It makes that resource available inside a + container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -5881,7 +5926,8 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + to an implementation-defined value. Requests cannot exceed + Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -6855,15 +6901,16 @@ spec: of a object,such as kind,name apiVersion properties: apiVersion: - description: API version of the referent + description: apiVersion is the API version of + the referent type: string kind: - description: 'Kind of the referent; More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"' + description: 'kind is the kind of the referent; + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string name: - description: 'Name of the referent; More info: - http://kubernetes.io/docs/user-guide/identifiers#names' + description: 'name is the name of the referent; + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string required: - kind @@ -7391,6 +7438,28 @@ spec: description: '`resources` are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be + set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in + pod.spec.resourceClaims of the Pod where this field + is used. It makes that resource available inside a + container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -7411,7 +7480,8 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + to an implementation-defined value. Requests cannot exceed + Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -7426,13 +7496,14 @@ spec: description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: - \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type - \ // +patchStrategy=merge // +listType=map // +listMapKey=type - \ Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` - \n // other fields }" + \n \ttype FooStatus struct{ \t // Represents the observations + of a foo's current state. \t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\" \t // + +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map + \t // +listMapKey=type \t Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields + \t}" properties: lastTransitionTime: description: lastTransitionTime is the last time the condition diff --git a/controllers/consoleplugin/consoleplugin_test.go b/controllers/consoleplugin/consoleplugin_test.go index 555682d3e..99e614add 100644 --- a/controllers/consoleplugin/consoleplugin_test.go +++ b/controllers/consoleplugin/consoleplugin_test.go @@ -232,6 +232,42 @@ func TestContainerUpdateCheck(t *testing.T) { assert.Contains(report.String(), "Volumes changed") } +func TestContainerUpdateWithLokistackMode(t *testing.T) { + assert := assert.New(t) + + //equals specs + plugin := getPluginConfig() + loki := flowslatest.FlowCollectorLoki{Mode: "LOKISTACK", LokiStack: &flowslatest.LokiStack{Name: "lokistack", Namespace: "ls-namespace"}} + spec := flowslatest.FlowCollectorSpec{ConsolePlugin: plugin, Loki: loki} + builder := newBuilder(testNamespace, testImage, &spec) + old := builder.deployment("digest") + nEw := builder.deployment("digest") + report := helper.NewChangeReport("") + assert.False(helper.PodChanged(&old.Spec.Template, &nEw.Spec.Template, constants.PluginName, &report)) + assert.Contains(report.String(), "no change") + + //update lokistack name + loki.LokiStack.Name = "lokistack-updated" + + spec = flowslatest.FlowCollectorSpec{ConsolePlugin: plugin, Loki: loki} + builder = newBuilder(testNamespace, testImage, &spec) + nEw = builder.deployment("digest") + report = helper.NewChangeReport("") + assert.True(helper.PodChanged(&old.Spec.Template, &nEw.Spec.Template, constants.PluginName, &report)) + assert.Contains(report.String(), "Volumes changed") + old = nEw + + //update lokistack namespace + loki.LokiStack.Namespace = "ls-namespace-updated" + + spec = flowslatest.FlowCollectorSpec{ConsolePlugin: plugin, Loki: loki} + builder = newBuilder(testNamespace, testImage, &spec) + nEw = builder.deployment("digest") + report = helper.NewChangeReport("") + assert.True(helper.PodChanged(&old.Spec.Template, &nEw.Spec.Template, constants.PluginName, &report)) + assert.Contains(report.String(), "Container changed") +} + func TestServiceUpdateCheck(t *testing.T) { assert := assert.New(t) old := getServiceSpecs() diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md index a110c14d0..4a4951179 100644 --- a/docs/FlowCollector.md +++ b/docs/FlowCollector.md @@ -8935,6 +8935,15 @@ Agent configuration for flows extraction. + claims + []object + + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers.
+ + false + limits map[string]int or string @@ -8945,13 +8954,40 @@ Agent configuration for flows extraction. requests map[string]int or string - Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
false +### FlowCollector.spec.agent.ebpf.resources.claims[index] +[↩ Parent](#flowcollectorspecagentebpfresources-1) + + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestring + Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
+
true
+ + ### FlowCollector.spec.agent.ipfix [↩ Parent](#flowcollectorspecagent-1) @@ -9680,21 +9716,21 @@ describedObject specifies the descriptions of a object,such as kind,name apiVers kind string - Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
+ kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
true name string - Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
+ name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
true apiVersion string - API version of the referent
+ apiVersion is the API version of the referent
false @@ -10231,6 +10267,15 @@ target specifies the target value for the given metric + claims + []object + + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers.
+ + false + limits map[string]int or string @@ -10241,13 +10286,40 @@ target specifies the target value for the given metric requests map[string]int or string - Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
false +### FlowCollector.spec.consolePlugin.resources.claims[index] +[↩ Parent](#flowcollectorspecconsolepluginresources-1) + + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestring + Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
+
true
+ + ### FlowCollector.spec.exporters[index] [↩ Parent](#flowcollectorspec-1) @@ -12338,21 +12410,21 @@ describedObject specifies the descriptions of a object,such as kind,name apiVers kind string - Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
+ kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
true name string - Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
+ name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
true apiVersion string - API version of the referent
+ apiVersion is the API version of the referent
false @@ -13058,6 +13130,15 @@ Reference to the CA file will be ignored + claims + []object + + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers.
+ + false + limits map[string]int or string @@ -13068,13 +13149,40 @@ Reference to the CA file will be ignored requests map[string]int or string - Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
false +### FlowCollector.spec.processor.resources.claims[index] +[↩ Parent](#flowcollectorspecprocessorresources-1) + + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestring + Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
+
true
+ + ### FlowCollector.status [↩ Parent](#flowcollector-1) @@ -13114,8 +13222,9 @@ Reference to the CA file will be ignored -Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` - // other fields } +Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + // other fields } diff --git a/pkg/volumes/builder.go b/pkg/volumes/builder.go index 448368914..b56b6d389 100644 --- a/pkg/volumes/builder.go +++ b/pkg/volumes/builder.go @@ -41,14 +41,14 @@ func (b *Builder) AddCertificate(ref *flowslatest.CertificateReference, volumeNa certPath = fmt.Sprintf("/var/%s/%s", volumeName, ref.CertFile) keyPath = fmt.Sprintf("/var/%s/%s", volumeName, ref.CertKey) vol, vm := buildVolumeAndMount(ref.Type, ref.Name, volumeName) - b.info = append(b.info, VolumeInfo{Volume: vol, Mount: vm}) + b.insertOrReplace(&VolumeInfo{Volume: vol, Mount: vm}) } return } func (b *Builder) AddVolume(config *flowslatest.FileReference, volumeName string) string { vol, vm := buildVolumeAndMount(config.Type, config.Name, volumeName) - b.info = append(b.info, VolumeInfo{Volume: vol, Mount: vm}) + b.insertOrReplace(&VolumeInfo{Volume: vol, Mount: vm}) return path.Join("var", volumeName, config.File) } @@ -129,3 +129,16 @@ func buildVolumeAndMount(refType flowslatest.MountableType, refName string, volu MountPath: "/var/" + volumeName, } } + +func (b *Builder) insertOrReplace(vi *VolumeInfo) { + // find any existing volume info and replace it + for i := range b.info { + if b.info[i].Volume.Name == vi.Volume.Name || b.info[i].Mount.Name == vi.Mount.Name { + b.info[i] = *vi + return + } + } + + // else just append new volume info + b.info = append(b.info, *vi) +} From 95a83ed557a5e9e3604b1e47b4ae57b4f759e371 Mon Sep 17 00:00:00 2001 From: Julien Pinsonneau Date: Thu, 21 Sep 2023 11:27:29 +0200 Subject: [PATCH 06/17] preserve loki config in conversions --- api/v1beta1/flowcollector_webhook.go | 39 +++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/api/v1beta1/flowcollector_webhook.go b/api/v1beta1/flowcollector_webhook.go index 525ce3bfd..de4b16da1 100644 --- a/api/v1beta1/flowcollector_webhook.go +++ b/api/v1beta1/flowcollector_webhook.go @@ -21,6 +21,7 @@ import ( "github.com/netobserv/network-observability-operator/api/v1beta2" utilconversion "github.com/netobserv/network-observability-operator/pkg/conversion" + "github.com/netobserv/network-observability-operator/pkg/helper" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" apiconversion "k8s.io/apimachinery/pkg/conversion" "sigs.k8s.io/controller-runtime/pkg/conversion" @@ -57,7 +58,26 @@ func (r *FlowCollector) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.Processor.Metrics.DisableAlerts = restored.Spec.Processor.Metrics.DisableAlerts } - dst.Spec.Loki.Manual = restored.Spec.Loki.Manual + // restore loki configuration from metadata + if len(dst.Spec.Loki.Mode) > 0 { + dst.Spec.Loki.Mode = restored.Spec.Loki.Mode + dst.Spec.Loki.Manual = restored.Spec.Loki.Manual + dst.Spec.Loki.LokiStack = restored.Spec.Loki.LokiStack + } else { + // fallback on previous Manual mode + dst.Spec.Loki.Mode = v1beta2.LokiModeManual + dst.Spec.Loki.Manual.IngesterURL = r.Spec.Loki.URL + dst.Spec.Loki.Manual.QuerierURL = r.Spec.Loki.QuerierURL + dst.Spec.Loki.Manual.StatusURL = r.Spec.Loki.StatusURL + dst.Spec.Loki.Manual.TenantID = r.Spec.Loki.TenantID + dst.Spec.Loki.Manual.AuthToken = r.Spec.Loki.AuthToken + if err := Convert_v1beta1_ClientTLS_To_v1beta2_ClientTLS(&r.Spec.Loki.TLS, &dst.Spec.Loki.Manual.TLS, nil); err != nil { + return fmt.Errorf("copying v1beta1.Loki.TLS into v1beta2.Loki.Manual.TLS: %w", err) + } + if err := Convert_v1beta1_ClientTLS_To_v1beta2_ClientTLS(&r.Spec.Loki.StatusTLS, &dst.Spec.Loki.Manual.StatusTLS, nil); err != nil { + return fmt.Errorf("copying v1beta1.Loki.StatusTLS into v1beta2.Loki.Manual.StatusTLS: %w", err) + } + } return nil } @@ -72,6 +92,23 @@ func (r *FlowCollector) ConvertFrom(srcRaw conversion.Hub) error { r.Status.Conditions = make([]v1.Condition, len(src.Status.Conditions)) copy(r.Status.Conditions, src.Status.Conditions) + r.Spec.Loki.URL = helper.LokiIngesterURL(&src.Spec.Loki) + r.Spec.Loki.QuerierURL = helper.LokiQuerierURL(&src.Spec.Loki) + r.Spec.Loki.StatusURL = helper.LokiStatusURL(&src.Spec.Loki) + r.Spec.Loki.TenantID = helper.LokiTenantID(&src.Spec.Loki) + switch src.Spec.Loki.Mode { + case v1beta2.LokiModeManual: + r.Spec.Loki.AuthToken = src.Spec.Loki.Manual.AuthToken + case v1beta2.LokiModeLokiStack: + r.Spec.Loki.AuthToken = v1beta2.LokiAuthForwardUserToken + } + if err := Convert_v1beta2_ClientTLS_To_v1beta1_ClientTLS(helper.LokiTLS(&src.Spec.Loki), &r.Spec.Loki.TLS, nil); err != nil { + return fmt.Errorf("copying v1beta2.LokiTLS into v1beta1.LokiTLS: %w", err) + } + if err := Convert_v1beta2_ClientTLS_To_v1beta1_ClientTLS(helper.LokiStatusTLS(&src.Spec.Loki), &r.Spec.Loki.StatusTLS, nil); err != nil { + return fmt.Errorf("copying v1beta2.LokiStatusTLS into v1beta1.LokiStatusTLS: %w", err) + } + // Preserve Hub data on down-conversion except for metadata return utilconversion.MarshalData(src, r) } From b2c94877837e35e6effd570e881ca035d9719aaa Mon Sep 17 00:00:00 2001 From: Julien Pinsonneau Date: Thu, 21 Sep 2023 16:59:46 +0200 Subject: [PATCH 07/17] set v1beta2 as current --- .mk/sample.mk | 4 +- Makefile | 3 +- PROJECT | 4 +- README.md | 6 +- .../flows.netobserv.io_flowcollectors.yaml | 1 + ...observ-operator.clusterserviceversion.yaml | 193 +++++++++++++++++- .../patches/webhook_in_flowcollectors.yaml | 1 + ...observ-operator.clusterserviceversion.yaml | 6 + config/samples/kustomization.yaml | 1 + config/webhook/manifests.yaml | 4 +- .../flowcollector_controller_minimal_test.go | 2 +- hack/asciidoc-gen-config.yaml | 2 +- hack/asciidoc-gen.sh | 26 +-- 13 files changed, 225 insertions(+), 28 deletions(-) diff --git a/.mk/sample.mk b/.mk/sample.mk index a4ce38c68..a8b31a6ee 100644 --- a/.mk/sample.mk +++ b/.mk/sample.mk @@ -3,9 +3,9 @@ deploy-sample-cr: @echo -e "\n==> Deploy sample CR" ifeq (main,$(VERSION)) - kubectl apply -f ./config/samples/flows_v1beta1_flowcollector.yaml || true + kubectl apply -f ./config/samples/flows_v1beta2_flowcollector.yaml || true else - kubectl apply -f ./config/samples/flows_v1beta1_flowcollector_versioned.yaml || true + kubectl apply -f ./config/samples/flows_v1beta2_flowcollector_versioned.yaml || true endif # Undeploy the sample FlowCollector CR diff --git a/Makefile b/Makefile index ad6fbdc40..458c72ef6 100644 --- a/Makefile +++ b/Makefile @@ -252,10 +252,11 @@ doc: crdoc ## Generate markdown documentation $(CRDOC) --resources config/crd/bases/flows.netobserv.io_flowcollectors.yaml --output docs/FlowCollector.md generate-go-conversions: $(CONVERSION_GEN) ## Run all generate-go-conversions - $(MAKE) clean-generated-conversions SRC_DIRS="./api/v1alpha1 ./api/v1beta1" + $(MAKE) clean-generated-conversions SRC_DIRS="./api/v1alpha1 ./api/v1beta1 ./api/v1beta2" $(CONVERSION_GEN) \ --input-dirs=./api/v1alpha1 \ --input-dirs=./api/v1beta1 \ + --input-dirs=./api/v1beta2 \ --build-tag=ignore_autogenerated_core \ --output-file-base=zz_generated.conversion \ $(CONVERSION_GEN_OUTPUT_BASE) \ diff --git a/PROJECT b/PROJECT index 6663bfd1b..297840b63 100644 --- a/PROJECT +++ b/PROJECT @@ -12,8 +12,8 @@ resources: domain: netobserv.io group: flows kind: FlowCollector - path: github.com/netobserv/network-observability-operator/api/v1beta1 - version: v1beta1 + path: github.com/netobserv/network-observability-operator/api/v1beta2 + version: v1beta2 webhooks: conversion: true webhookVersion: v1 diff --git a/README.md b/README.md index d0e3fcc2a..3ff96dd49 100644 --- a/README.md +++ b/README.md @@ -44,7 +44,7 @@ To deploy the monitoring pipeline, this `make` target installs a `FlowCollector` make deploy-sample-cr ``` -Alternatively, you can [grab and edit](./config/samples/flows_v1beta1_flowcollector.yaml) this config before installing it. +Alternatively, you can [grab and edit](./config/samples/flows_v1beta2_flowcollector.yaml) this config before installing it. You can still edit the `FlowCollector` after it's installed: the operator will take care about reconciling everything with the updated configuration: @@ -61,7 +61,7 @@ To deploy a specific version of the operator, you need to switch to the related ```bash git checkout 0.1.2 VERSION=0.1.2 make deploy deploy-loki deploy-grafana -kubectl apply -f ./config/samples/flows_v1beta1_flowcollector_versioned.yaml +kubectl apply -f ./config/samples/flows_v1beta2_flowcollector_versioned.yaml ``` Beware that the version of the underlying components, such as flowlogs-pipeline, may be tied to the version of the operator (this is why we recommend switching the git branch). Breaking this correlation may result in crashes. The versions of the underlying components are defined in the `FlowCollector` resource as image tags. @@ -110,7 +110,7 @@ To get dashboards, import [this file](./config/samples/dashboards/Network%20Obse ## Configuration -The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](./docs/FlowCollector.md), and a full sample file [there](./config/samples/flows_v1beta1_flowcollector.yaml). +The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](./docs/FlowCollector.md), and a full sample file [there](./config/samples/flows_v1beta2_flowcollector.yaml). To edit configuration in cluster, run: diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml index b26d0f0de..2022b6fb2 100644 --- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml +++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml @@ -17,6 +17,7 @@ spec: conversionReviewVersions: - v1alpha1 - v1beta1 + - v1beta2 group: flows.netobserv.io names: kind: FlowCollector diff --git a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml index 9130aa0e1..1fb7c0594 100644 --- a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml +++ b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml @@ -350,6 +350,189 @@ metadata: } } } + }, + { + "apiVersion": "flows.netobserv.io/v1beta2", + "kind": "FlowCollector", + "metadata": { + "name": "cluster" + }, + "spec": { + "agent": { + "ebpf": { + "cacheActiveTimeout": "5s", + "cacheMaxFlows": 100000, + "excludeInterfaces": [ + "lo" + ], + "imagePullPolicy": "IfNotPresent", + "interfaces": [], + "kafkaBatchSize": 10485760, + "logLevel": "info", + "resources": { + "limits": { + "memory": "800Mi" + }, + "requests": { + "cpu": "100m", + "memory": "50Mi" + } + }, + "sampling": 50 + }, + "type": "EBPF" + }, + "consolePlugin": { + "autoscaler": { + "maxReplicas": 3, + "metrics": [ + { + "resource": { + "name": "cpu", + "target": { + "averageUtilization": 50, + "type": "Utilization" + } + }, + "type": "Resource" + } + ], + "minReplicas": 1, + "status": "DISABLED" + }, + "imagePullPolicy": "IfNotPresent", + "logLevel": "info", + "port": 9001, + "portNaming": { + "enable": true, + "portNames": { + "3100": "loki" + } + }, + "quickFilters": [ + { + "default": true, + "filter": { + "dst_namespace!": "openshift-,netobserv", + "src_namespace!": "openshift-,netobserv" + }, + "name": "Applications" + }, + { + "filter": { + "dst_namespace": "openshift-,netobserv", + "src_namespace": "openshift-,netobserv" + }, + "name": "Infrastructure" + }, + { + "default": true, + "filter": { + "dst_kind": "Pod", + "src_kind": "Pod" + }, + "name": "Pods network" + }, + { + "filter": { + "dst_kind": "Service" + }, + "name": "Services network" + } + ], + "register": true + }, + "deploymentModel": "DIRECT", + "exporters": [], + "kafka": { + "address": "kafka-cluster-kafka-bootstrap.netobserv", + "tls": { + "caCert": { + "certFile": "ca.crt", + "name": "kafka-cluster-cluster-ca-cert", + "type": "secret" + }, + "enable": false, + "userCert": { + "certFile": "user.crt", + "certKey": "user.key", + "name": "flp-kafka", + "type": "secret" + } + }, + "topic": "network-flows" + }, + "loki": { + "batchSize": 10485760, + "batchWait": "1s", + "manual": { + "ingesterUrl": "http://loki.netobserv.svc:3100/", + "statusTls": { + "caCert": { + "certFile": "service-ca.crt", + "name": "loki-ca-bundle", + "type": "configmap" + }, + "enable": false, + "insecureSkipVerify": false, + "userCert": { + "certFile": "tls.crt", + "certKey": "tls.key", + "name": "loki-query-frontend-http", + "type": "secret" + } + }, + "tls": { + "caCert": { + "certFile": "service-ca.crt", + "name": "loki-gateway-ca-bundle", + "type": "configmap" + }, + "enable": false, + "insecureSkipVerify": false + } + }, + "maxBackoff": "5s", + "maxRetries": 2, + "minBackoff": "1s", + "mode": "MANUAL" + }, + "namespace": "netobserv", + "processor": { + "conversationEndTimeout": "10s", + "conversationHeartbeatInterval": "30s", + "conversationTerminatingTimeout": "5s", + "dropUnusedFields": true, + "imagePullPolicy": "IfNotPresent", + "kafkaConsumerAutoscaler": null, + "kafkaConsumerBatchSize": 10485760, + "kafkaConsumerQueueCapacity": 1000, + "kafkaConsumerReplicas": 3, + "logLevel": "info", + "logTypes": "FLOWS", + "metrics": { + "disableAlerts": [], + "ignoreTags": [ + "egress", + "packets" + ], + "server": { + "port": 9102 + } + }, + "port": 2055, + "profilePort": 6060, + "resources": { + "limits": { + "memory": "800Mi" + }, + "requests": { + "cpu": "100m", + "memory": "100Mi" + } + } + } + } } ] capabilities: Seamless Upgrades @@ -386,7 +569,10 @@ spec: kind: FlowCollector name: flowcollectors.flows.netobserv.io version: v1beta1 - - kind: FlowCollector + - description: '`FlowCollector` is the schema for the network flows collection + API, which pilots and configures the underlying deployments.' + displayName: Flow Collector + kind: FlowCollector name: flowcollectors.flows.netobserv.io version: v1beta2 description: |- @@ -862,6 +1048,7 @@ spec: - admissionReviewVersions: - v1alpha1 - v1beta1 + - v1beta2 containerPort: 443 conversionCRDs: - flowcollectors.flows.netobserv.io @@ -881,7 +1068,7 @@ spec: - apiGroups: - netobserv.io apiVersions: - - v1beta1 + - v1beta2 operations: - CREATE - UPDATE @@ -890,4 +1077,4 @@ spec: sideEffects: None targetPort: 9443 type: ValidatingAdmissionWebhook - webhookPath: /validate-netobserv-io-v1beta1-flowcollector + webhookPath: /validate-netobserv-io-v1beta2-flowcollector diff --git a/config/crd/patches/webhook_in_flowcollectors.yaml b/config/crd/patches/webhook_in_flowcollectors.yaml index 0d475d694..fa0d9bee0 100644 --- a/config/crd/patches/webhook_in_flowcollectors.yaml +++ b/config/crd/patches/webhook_in_flowcollectors.yaml @@ -15,3 +15,4 @@ spec: conversionReviewVersions: - v1alpha1 - v1beta1 + - v1beta2 diff --git a/config/csv/bases/netobserv-operator.clusterserviceversion.yaml b/config/csv/bases/netobserv-operator.clusterserviceversion.yaml index 66f52bc05..4616edb1f 100644 --- a/config/csv/bases/netobserv-operator.clusterserviceversion.yaml +++ b/config/csv/bases/netobserv-operator.clusterserviceversion.yaml @@ -35,6 +35,12 @@ spec: kind: FlowCollector name: flowcollectors.flows.netobserv.io version: v1beta1 + - description: '`FlowCollector` is the schema for the network flows collection + API, which pilots and configures the underlying deployments.' + displayName: Flow Collector + kind: FlowCollector + name: flowcollectors.flows.netobserv.io + version: v1beta2 description: ':full-description:' displayName: NetObserv Operator icon: diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 6976580b0..600f2703d 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -2,4 +2,5 @@ resources: - flows_v1alpha1_flowcollector.yaml - flows_v1beta1_flowcollector.yaml +- flows_v1beta2_flowcollector.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index 143c344a6..62763b647 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -12,14 +12,14 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-netobserv-io-v1beta1-flowcollector + path: /validate-netobserv-io-v1beta2-flowcollector failurePolicy: Fail name: flowcollectorconversionwebhook.netobserv.io rules: - apiGroups: - netobserv.io apiVersions: - - v1beta1 + - v1beta2 operations: - CREATE - UPDATE diff --git a/controllers/flowcollector_controller_minimal_test.go b/controllers/flowcollector_controller_minimal_test.go index 34429977f..e4df0708c 100644 --- a/controllers/flowcollector_controller_minimal_test.go +++ b/controllers/flowcollector_controller_minimal_test.go @@ -8,7 +8,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" ) diff --git a/hack/asciidoc-gen-config.yaml b/hack/asciidoc-gen-config.yaml index f41f4fc40..e15e1e5a9 100644 --- a/hack/asciidoc-gen-config.yaml +++ b/hack/asciidoc-gen-config.yaml @@ -6,4 +6,4 @@ apiMap: resources: - kind: FlowCollector group: flows.netobserv.io - version: v1beta1 + version: v1beta2 diff --git a/hack/asciidoc-gen.sh b/hack/asciidoc-gen.sh index bb0a426b5..bc0dd2fae 100755 --- a/hack/asciidoc-gen.sh +++ b/hack/asciidoc-gen.sh @@ -5,24 +5,24 @@ set -e mkdir -p _tmp oc get --raw /openapi/v2 | jq . > _tmp/openapi.json -jq '.definitions |= ({"io.netobserv.flows.v1beta1.FlowCollector"}) - | del(.definitions."io.netobserv.flows.v1beta1.FlowCollector".properties.status) - | del(.definitions."io.netobserv.flows.v1beta1.FlowCollector".properties.metadata."$ref") - | .definitions."io.netobserv.flows.v1beta1.FlowCollector".properties.metadata += {type:"object"} - | del(.definitions."io.netobserv.flows.v1beta1.FlowCollector".properties.spec.properties.agent.properties.ebpf.properties.resources.properties.claims) - | del(.definitions."io.netobserv.flows.v1beta1.FlowCollector".properties.spec.properties.processor.properties.resources.properties.claims) - | del(.definitions."io.netobserv.flows.v1beta1.FlowCollector".properties.spec.properties.consolePlugin.properties.resources.properties.claims) - | del(.definitions."io.netobserv.flows.v1beta1.FlowCollector".properties.spec.properties.consolePlugin.properties.autoscaler.properties) - | del(.definitions."io.netobserv.flows.v1beta1.FlowCollector".properties.spec.properties.processor.properties.kafkaConsumerAutoscaler.properties) - | .definitions."io.netobserv.flows.v1beta1.FlowCollector".properties.spec.properties.consolePlugin.properties.autoscaler.description |= . + " Refer to HorizontalPodAutoscaler documentation (autoscaling/v2)." - | .definitions."io.netobserv.flows.v1beta1.FlowCollector".properties.spec.properties.processor.properties.kafkaConsumerAutoscaler.description |= . + " Refer to HorizontalPodAutoscaler documentation (autoscaling/v2)."' \ +jq '.definitions |= ({"io.netobserv.flows.v1beta2.FlowCollector"}) + | del(.definitions."io.netobserv.flows.v1beta2.FlowCollector".properties.status) + | del(.definitions."io.netobserv.flows.v1beta2.FlowCollector".properties.metadata."$ref") + | .definitions."io.netobserv.flows.v1beta2.FlowCollector".properties.metadata += {type:"object"} + | del(.definitions."io.netobserv.flows.v1beta2.FlowCollector".properties.spec.properties.agent.properties.ebpf.properties.resources.properties.claims) + | del(.definitions."io.netobserv.flows.v1beta2.FlowCollector".properties.spec.properties.processor.properties.resources.properties.claims) + | del(.definitions."io.netobserv.flows.v1beta2.FlowCollector".properties.spec.properties.consolePlugin.properties.resources.properties.claims) + | del(.definitions."io.netobserv.flows.v1beta2.FlowCollector".properties.spec.properties.consolePlugin.properties.autoscaler.properties) + | del(.definitions."io.netobserv.flows.v1beta2.FlowCollector".properties.spec.properties.processor.properties.kafkaConsumerAutoscaler.properties) + | .definitions."io.netobserv.flows.v1beta2.FlowCollector".properties.spec.properties.consolePlugin.properties.autoscaler.description |= . + " Refer to HorizontalPodAutoscaler documentation (autoscaling/v2)." + | .definitions."io.netobserv.flows.v1beta2.FlowCollector".properties.spec.properties.processor.properties.kafkaConsumerAutoscaler.description |= . + " Refer to HorizontalPodAutoscaler documentation (autoscaling/v2)."' \ _tmp/openapi.json > _tmp/openapi-amended.json openshift-apidocs-gen build -c hack/asciidoc-gen-config.yaml _tmp/openapi-amended.json -ADOC=docs/flowcollector-flows-netobserv-io-v1beta1.adoc +ADOC=docs/flowcollector-flows-netobserv-io-v1beta2.adoc -mv _tmp/flows_netobserv_io/flowcollector-flows-netobserv-io-v1beta1.adoc $ADOC +mv _tmp/flows_netobserv_io/flowcollector-flows-netobserv-io-v1beta2.adoc $ADOC sed -i -r 's/^:_content-type: ASSEMBLY$/:_content-type: REFERENCE/' $ADOC sed -i -r 's/^\[id="flowcollector-flows-netobserv-io-v.+"\]$/[id="network-observability-flowcollector-api-specifications_{context}"]/' $ADOC From 8b7b856fdd7884ed25795ec810a6a276cd2a08a6 Mon Sep 17 00:00:00 2001 From: acmenezes Date: Thu, 21 Sep 2023 15:21:44 -0400 Subject: [PATCH 08/17] FIX comments on v1beta2 and regenerate Signed-off-by: acmenezes --- api/v1beta2/flowcollector_types.go | 51 ++-- .../flows.netobserv.io_flowcollectors.yaml | 218 +++++++++--------- docs/FlowCollector.md | 66 +++--- 3 files changed, 167 insertions(+), 168 deletions(-) diff --git a/api/v1beta2/flowcollector_types.go b/api/v1beta2/flowcollector_types.go index 8ca9cc0bc..dbf42572a 100644 --- a/api/v1beta2/flowcollector_types.go +++ b/api/v1beta2/flowcollector_types.go @@ -47,7 +47,6 @@ type FlowCollectorSpec struct { // Important: Run "make generate" to regenerate code after modifying this file // Namespace where NetObserv pods are deployed. - // If empty, the namespace of the operator is going to be used. // +kubebuilder:default:=netobserv Namespace string `json:"namespace,omitempty"` @@ -117,12 +116,12 @@ type FlowCollectorIPFIX struct { //+kubebuilder:validation:Pattern:=^\d+(ns|ms|s|m)?$ //+kubebuilder:default:="20s" - // `cacheActiveTimeout` is the max period during which the reporter will aggregate flows before sending + // `cacheActiveTimeout` is the max period during which the reporter aggregates flows before sending. CacheActiveTimeout string `json:"cacheActiveTimeout,omitempty" mapstructure:"cacheActiveTimeout,omitempty"` //+kubebuilder:validation:Minimum=0 //+kubebuilder:default:=400 - // `cacheMaxFlows` is the max number of flows in an aggregate; when reached, the reporter sends the flows + // `cacheMaxFlows` is the max number of flows in an aggregate; when reached, the reporter sends the flows. CacheMaxFlows int32 `json:"cacheMaxFlows,omitempty" mapstructure:"cacheMaxFlows,omitempty"` //+kubebuilder:validation:Minimum=2 @@ -181,7 +180,7 @@ type FlowCollectorEBPF struct { //+optional Sampling *int32 `json:"sampling,omitempty"` - // `cacheActiveTimeout` is the max period during which the reporter will aggregate flows before sending. + // `cacheActiveTimeout` is the max period during which the reporter aggregates flows before sending. // Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, // however you can expect higher memory consumption and an increased latency in the flow collection. //+kubebuilder:validation:Pattern:=^\d+(ns|ms|s|m)?$ @@ -195,14 +194,14 @@ type FlowCollectorEBPF struct { //+kubebuilder:default:=100000 CacheMaxFlows int32 `json:"cacheMaxFlows,omitempty"` - // `interfaces` contains the interface names from where flows will be collected. If empty, the agent - // will fetch all the interfaces in the system, excepting the ones listed in ExcludeInterfaces. + // `interfaces` contains the interface names from where flows are collected. If empty, the agent + // fetches all the interfaces in the system, excepting the ones listed in ExcludeInterfaces. // An entry is enclosed by slashes, such as `/br-/`, is matched as a regular expression. // Otherwise it is matched as a case-sensitive string. //+optional Interfaces []string `json:"interfaces"` - // `excludeInterfaces` contains the interface names that will be excluded from flow tracing. + // `excludeInterfaces` contains the interface names that are excluded from flow tracing. // An entry is enclosed by slashes, such as `/br-/`, is matched as a regular expression. // Otherwise it is matched as a case-sensitive string. //+kubebuilder:default=lo; @@ -215,7 +214,7 @@ type FlowCollectorEBPF struct { LogLevel string `json:"logLevel,omitempty"` // Privileged mode for the eBPF Agent container. In general this setting can be ignored or set to false: - // in that case, the operator will set granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) + // in that case, the operator sets granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) // to the container, to enable its correct operation. // If for some reason these capabilities cannot be set, such as if an old kernel version not knowing CAP_BPF // is in use, then you can turn on this mode for more global privileges. @@ -254,7 +253,7 @@ type FlowCollectorKafka struct { Address string `json:"address"` //+kubebuilder:default:="" - // Kafka topic to use. It must exist, NetObserv will not create it. + // Kafka topic to use. It must exist, NetObserv does not create it. Topic string `json:"topic"` // TLS client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. @@ -307,11 +306,11 @@ type ServerTLS struct { Provided *CertificateReference `json:"provided"` //+kubebuilder:default:=false - // insecureSkipVerify allows skipping client-side verification of the provided certificate - // If set to true, ProvidedCaFile field will be ignored + // insecureSkipVerify allows skipping client-side verification of the provided certificate. + // If set to true, the `providedCaFile` field is ignored. InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"` - // Reference to the CA file will be ignored + // Reference to the CA file when `type` is set to `PROVIDED`. // +optional ProvidedCaFile *FileReference `json:"providedCaFile,omitempty"` } @@ -439,7 +438,7 @@ type FlowCollectorFLP struct { //+kubebuilder:default:=10485760 // +optional - // `kafkaConsumerBatchSize` indicates to the broker the maximum batch size, in bytes, that the consumer will accept. Ignored when not using Kafka. Default: 10MB. + // `kafkaConsumerBatchSize` indicates to the broker the maximum batch size, in bytes, that the consumer accepts. Ignored when not using Kafka. Default: 10MB. KafkaConsumerBatchSize int `json:"kafkaConsumerBatchSize"` // `logTypes` defines the desired record types to generate. Possible values are:
@@ -489,8 +488,8 @@ type FlowCollectorHPA struct { // +kubebuilder:validation:Enum:=DISABLED;ENABLED // +kubebuilder:default:=DISABLED // `status` describes the desired status regarding deploying an horizontal pod autoscaler.
- // - `DISABLED` will not deploy an horizontal pod autoscaler.
- // - `ENABLED` will deploy an horizontal pod autoscaler.
+ // - `DISABLED` does not deploy an horizontal pod autoscaler.
+ // - `ENABLED` deploys an horizontal pod autoscaler.
Status string `json:"status,omitempty"` // `minReplicas` is the lower limit for the number of replicas to which the autoscaler @@ -527,18 +526,18 @@ type LokiManualParams struct { //+kubebuilder:validation:optional // `querierURL` specifies the address of the Loki querier service, in case it is different from the - // Loki ingester URL. If empty, the URL value will be used (assuming that the Loki ingester + // Loki ingester URL. If empty, the URL value is used (assuming that the Loki ingester // and querier are in the same server). When using the Loki Operator, do not set it, since // ingestion and queries use the Loki gateway. QuerierURL string `json:"querierUrl,omitempty"` //+kubebuilder:validation:optional // `statusURL` specifies the address of the Loki `/ready`, `/metrics` and `/config` endpoints, in case it is different from the - // Loki querier URL. If empty, the `querierURL` value will be used. + // Loki querier URL. If empty, the `querierURL` value is used. // This is useful to show error messages and some context in the frontend. // When using the Loki Operator, set it to the Loki HTTP query frontend service, for example // https://loki-query-frontend-http.netobserv.svc:3100/. - // `statusTLS` configuration will be used when `statusUrl` is set. + // `statusTLS` configuration is used when `statusUrl` is set. StatusURL string `json:"statusUrl,omitempty"` //+kubebuilder:default:="netobserv" @@ -549,9 +548,9 @@ type LokiManualParams struct { // +kubebuilder:validation:Enum:="DISABLED";"HOST";"FORWARD" //+kubebuilder:default:="DISABLED" // `authToken` describes the way to get a token to authenticate to Loki.
- // - `DISABLED` will not send any token with the request.
- // - `FORWARD` will forward the user token for authorization.
- // - `HOST` [deprecated (*)] - will use the local pod service account to authenticate to Loki.
+ // - `DISABLED` does not send any token with the request.
+ // - `FORWARD` forwards the user token for authorization.
+ // - `HOST` [deprecated (*)] - uses the local pod service account to authenticate to Loki.
// When using the Loki Operator, this must be set to `FORWARD`. AuthToken string `json:"authToken,omitempty"` @@ -700,7 +699,7 @@ type ConsolePluginPortConfig struct { // `QuickFilter` defines preset configuration for Console's quick filters type QuickFilter struct { - // Name of the filter, that will be displayed in Console + // Name of the filter, that is displayed in the Console // +kubebuilder:MinLength:=1 Name string `json:"name"` // `filter` is a set of keys and values to be set when this filter is selected. Each key can relate to a list of values using a coma-separated string, @@ -753,8 +752,8 @@ type FileReference struct { // Name of the config map or secret containing the file Name string `json:"name,omitempty"` - // Namespace of the config map or secret containing the file. If omitted, assumes same namespace as where NetObserv is deployed. - // If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. + // Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. + // If the namespace is different, the config map or the secret is copied so that it can be mounted as required. // +optional //+kubebuilder:default:="" Namespace string `json:"namespace,omitempty"` @@ -771,8 +770,8 @@ type CertificateReference struct { // Name of the config map or secret containing certificates Name string `json:"name,omitempty"` - // Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. - // If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. + // Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. + // If the namespace is different, the config map or the secret is copied so that it can be mounted as required. // +optional //+kubebuilder:default:="" Namespace string `json:"namespace,omitempty"` diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml index efd774e4b..0ed293abc 100644 --- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml +++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml @@ -4957,11 +4957,11 @@ spec: cacheActiveTimeout: default: 5s description: '`cacheActiveTimeout` is the max period during - which the reporter will aggregate flows before sending. - Increasing `cacheMaxFlows` and `cacheActiveTimeout` can - decrease the network traffic overhead and the CPU load, - however you can expect higher memory consumption and an - increased latency in the flow collection.' + which the reporter aggregates flows before sending. Increasing + `cacheMaxFlows` and `cacheActiveTimeout` can decrease the + network traffic overhead and the CPU load, however you can + expect higher memory consumption and an increased latency + in the flow collection.' pattern: ^\d+(ns|ms|s|m)?$ type: string cacheMaxFlows: @@ -4997,7 +4997,7 @@ spec: default: - lo description: '`excludeInterfaces` contains the interface names - that will be excluded from flow tracing. An entry is enclosed + that are excluded from flow tracing. An entry is enclosed by slashes, such as `/br-/`, is matched as a regular expression. Otherwise it is matched as a case-sensitive string.' items: @@ -5040,10 +5040,10 @@ spec: type: string interfaces: description: '`interfaces` contains the interface names from - where flows will be collected. If empty, the agent will - fetch all the interfaces in the system, excepting the ones - listed in ExcludeInterfaces. An entry is enclosed by slashes, - such as `/br-/`, is matched as a regular expression. Otherwise + where flows are collected. If empty, the agent fetches all + the interfaces in the system, excepting the ones listed + in ExcludeInterfaces. An entry is enclosed by slashes, such + as `/br-/`, is matched as a regular expression. Otherwise it is matched as a case-sensitive string.' items: type: string @@ -5070,12 +5070,12 @@ spec: privileged: description: 'Privileged mode for the eBPF Agent container. In general this setting can be ignored or set to false: - in that case, the operator will set granular capabilities - (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, - to enable its correct operation. If for some reason these - capabilities cannot be set, such as if an old kernel version - not knowing CAP_BPF is in use, then you can turn on this - mode for more global privileges.' + in that case, the operator sets granular capabilities (BPF, + PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, to enable + its correct operation. If for some reason these capabilities + cannot be set, such as if an old kernel version not knowing + CAP_BPF is in use, then you can turn on this mode for more + global privileges.' type: boolean resources: default: @@ -5149,13 +5149,13 @@ spec: cacheActiveTimeout: default: 20s description: '`cacheActiveTimeout` is the max period during - which the reporter will aggregate flows before sending' + which the reporter aggregates flows before sending.' pattern: ^\d+(ns|ms|s|m)?$ type: string cacheMaxFlows: default: 400 description: '`cacheMaxFlows` is the max number of flows in - an aggregate; when reached, the reporter sends the flows' + an aggregate; when reached, the reporter sends the flows.' format: int32 minimum: 0 type: integer @@ -5753,8 +5753,8 @@ spec: default: DISABLED description: '`status` describes the desired status regarding deploying an horizontal pod autoscaler.
- `DISABLED` - will not deploy an horizontal pod autoscaler.
- `ENABLED` - will deploy an horizontal pod autoscaler.
' + does not deploy an horizontal pod autoscaler.
- `ENABLED` + deploys an horizontal pod autoscaler.
' enum: - DISABLED - ENABLED @@ -5850,8 +5850,8 @@ spec: example, `filter: {"src_namespace": "namespace1,namespace2"}`.' type: object name: - description: Name of the filter, that will be displayed - in Console + description: Name of the filter, that is displayed in the + Console type: string required: - filter @@ -5999,11 +5999,11 @@ spec: namespace: default: "" description: Namespace of the config map or secret - containing the file. If omitted, assumes same - namespace as where NetObserv is deployed. If the - namespace is different, the config map or the - secret will be copied so that it can be mounted - as required. + containing the file. If omitted, the default is + to use the same namespace as where NetObserv is + deployed. If the namespace is different, the config + map or the secret is copied so that it can be + mounted as required. type: string type: description: 'Type for the file reference: "configmap" @@ -6028,11 +6028,11 @@ spec: namespace: default: "" description: Namespace of the config map or secret - containing the file. If omitted, assumes same - namespace as where NetObserv is deployed. If the - namespace is different, the config map or the - secret will be copied so that it can be mounted - as required. + containing the file. If omitted, the default is + to use the same namespace as where NetObserv is + deployed. If the namespace is different, the config + map or the secret is copied so that it can be + mounted as required. type: string type: description: 'Type for the file reference: "configmap" @@ -6078,11 +6078,11 @@ spec: namespace: default: "" description: Namespace of the config map or secret - containing certificates. If omitted, assumes the - same namespace as where NetObserv is deployed. - If the namespace is different, the config map - or the secret will be copied so that it can be - mounted as required. + containing certificates. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, the + config map or the secret is copied so that it + can be mounted as required. type: string type: description: 'Type for the certificate reference: @@ -6124,11 +6124,11 @@ spec: namespace: default: "" description: Namespace of the config map or secret - containing certificates. If omitted, assumes the - same namespace as where NetObserv is deployed. - If the namespace is different, the config map - or the secret will be copied so that it can be - mounted as required. + containing certificates. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, the + config map or the secret is copied so that it + can be mounted as required. type: string type: description: 'Type for the certificate reference: @@ -6142,7 +6142,7 @@ spec: topic: default: "" description: Kafka topic to use. It must exist, NetObserv - will not create it. + does not create it. type: string required: - address @@ -6185,10 +6185,10 @@ spec: namespace: default: "" description: Namespace of the config map or secret containing - the file. If omitted, assumes same namespace as where - NetObserv is deployed. If the namespace is different, - the config map or the secret will be copied so that - it can be mounted as required. + the file. If omitted, the default is to use the same + namespace as where NetObserv is deployed. If the namespace + is different, the config map or the secret is copied + so that it can be mounted as required. type: string type: description: 'Type for the file reference: "configmap" @@ -6212,10 +6212,10 @@ spec: namespace: default: "" description: Namespace of the config map or secret containing - the file. If omitted, assumes same namespace as where - NetObserv is deployed. If the namespace is different, - the config map or the secret will be copied so that - it can be mounted as required. + the file. If omitted, the default is to use the same + namespace as where NetObserv is deployed. If the namespace + is different, the config map or the secret is copied + so that it can be mounted as required. type: string type: description: 'Type for the file reference: "configmap" @@ -6260,10 +6260,10 @@ spec: namespace: default: "" description: Namespace of the config map or secret containing - certificates. If omitted, assumes the same namespace - as where NetObserv is deployed. If the namespace is - different, the config map or the secret will be copied - so that it can be mounted as required. + certificates. If omitted, the default is to use the + same namespace as where NetObserv is deployed. If the + namespace is different, the config map or the secret + is copied so that it can be mounted as required. type: string type: description: 'Type for the certificate reference: `configmap` @@ -6304,10 +6304,10 @@ spec: namespace: default: "" description: Namespace of the config map or secret containing - certificates. If omitted, assumes the same namespace - as where NetObserv is deployed. If the namespace is - different, the config map or the secret will be copied - so that it can be mounted as required. + certificates. If omitted, the default is to use the + same namespace as where NetObserv is deployed. If the + namespace is different, the config map or the secret + is copied so that it can be mounted as required. type: string type: description: 'Type for the certificate reference: `configmap` @@ -6320,7 +6320,7 @@ spec: type: object topic: default: "" - description: Kafka topic to use. It must exist, NetObserv will + description: Kafka topic to use. It must exist, NetObserv does not create it. type: string required: @@ -6366,12 +6366,12 @@ spec: authToken: default: DISABLED description: '`authToken` describes the way to get a token - to authenticate to Loki.
- `DISABLED` will not send - any token with the request.
- `FORWARD` will forward - the user token for authorization.
- `HOST` [deprecated - (*)] - will use the local pod service account to authenticate - to Loki.
When using the Loki Operator, this must be - set to `FORWARD`.' + to authenticate to Loki.
- `DISABLED` does not send + any token with the request.
- `FORWARD` forwards the + user token for authorization.
- `HOST` [deprecated (*)] + - uses the local pod service account to authenticate to + Loki.
When using the Loki Operator, this must be set + to `FORWARD`.' enum: - DISABLED - HOST @@ -6387,8 +6387,8 @@ spec: querierUrl: description: '`querierURL` specifies the address of the Loki querier service, in case it is different from the Loki ingester - URL. If empty, the URL value will be used (assuming that - the Loki ingester and querier are in the same server). When + URL. If empty, the URL value is used (assuming that the + Loki ingester and querier are in the same server). When using the Loki Operator, do not set it, since ingestion and queries use the Loki gateway.' type: string @@ -6415,11 +6415,11 @@ spec: namespace: default: "" description: Namespace of the config map or secret - containing certificates. If omitted, assumes the - same namespace as where NetObserv is deployed. If - the namespace is different, the config map or the - secret will be copied so that it can be mounted - as required. + containing certificates. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, the + config map or the secret is copied so that it can + be mounted as required. type: string type: description: 'Type for the certificate reference: @@ -6460,11 +6460,11 @@ spec: namespace: default: "" description: Namespace of the config map or secret - containing certificates. If omitted, assumes the - same namespace as where NetObserv is deployed. If - the namespace is different, the config map or the - secret will be copied so that it can be mounted - as required. + containing certificates. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, the + config map or the secret is copied so that it can + be mounted as required. type: string type: description: 'Type for the certificate reference: @@ -6479,11 +6479,11 @@ spec: description: '`statusURL` specifies the address of the Loki `/ready`, `/metrics` and `/config` endpoints, in case it is different from the Loki querier URL. If empty, the `querierURL` - value will be used. This is useful to show error messages - and some context in the frontend. When using the Loki Operator, + value is used. This is useful to show error messages and + some context in the frontend. When using the Loki Operator, set it to the Loki HTTP query frontend service, for example https://loki-query-frontend-http.netobserv.svc:3100/. `statusTLS` - configuration will be used when `statusUrl` is set.' + configuration is used when `statusUrl` is set.' type: string tenantID: default: netobserv @@ -6515,11 +6515,11 @@ spec: namespace: default: "" description: Namespace of the config map or secret - containing certificates. If omitted, assumes the - same namespace as where NetObserv is deployed. If - the namespace is different, the config map or the - secret will be copied so that it can be mounted - as required. + containing certificates. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, the + config map or the secret is copied so that it can + be mounted as required. type: string type: description: 'Type for the certificate reference: @@ -6560,11 +6560,11 @@ spec: namespace: default: "" description: Namespace of the config map or secret - containing certificates. If omitted, assumes the - same namespace as where NetObserv is deployed. If - the namespace is different, the config map or the - secret will be copied so that it can be mounted - as required. + containing certificates. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, the + config map or the secret is copied so that it can + be mounted as required. type: string type: description: 'Type for the certificate reference: @@ -6615,8 +6615,7 @@ spec: type: object namespace: default: netobserv - description: Namespace where NetObserv pods are deployed. If empty, - the namespace of the operator is going to be used. + description: Namespace where NetObserv pods are deployed. type: string processor: description: '`processor` defines the settings of the component that @@ -7218,8 +7217,8 @@ spec: default: DISABLED description: '`status` describes the desired status regarding deploying an horizontal pod autoscaler.
- `DISABLED` - will not deploy an horizontal pod autoscaler.
- `ENABLED` - will deploy an horizontal pod autoscaler.
' + does not deploy an horizontal pod autoscaler.
- `ENABLED` + deploys an horizontal pod autoscaler.
' enum: - DISABLED - ENABLED @@ -7228,7 +7227,7 @@ spec: kafkaConsumerBatchSize: default: 10485760 description: '`kafkaConsumerBatchSize` indicates to the broker - the maximum batch size, in bytes, that the consumer will accept. + the maximum batch size, in bytes, that the consumer accepts. Ignored when not using Kafka. Default: 10MB.' type: integer kafkaConsumerQueueCapacity: @@ -7330,8 +7329,8 @@ spec: insecureSkipVerify: default: false description: insecureSkipVerify allows skipping client-side - verification of the provided certificate If set - to true, ProvidedCaFile field will be ignored + verification of the provided certificate. If set + to true, the `providedCaFile` field is ignored. type: boolean provided: description: TLS configuration when `type` is set @@ -7355,11 +7354,11 @@ spec: namespace: default: "" description: Namespace of the config map or secret - containing certificates. If omitted, assumes - the same namespace as where NetObserv is deployed. - If the namespace is different, the config map - or the secret will be copied so that it can - be mounted as required. + containing certificates. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, + the config map or the secret is copied so that + it can be mounted as required. type: string type: description: 'Type for the certificate reference: @@ -7370,7 +7369,8 @@ spec: type: string type: object providedCaFile: - description: Reference to the CA file will be ignored + description: Reference to the CA file when `type` + is set to `PROVIDED`. properties: file: description: File name within the config map or @@ -7383,11 +7383,11 @@ spec: namespace: default: "" description: Namespace of the config map or secret - containing the file. If omitted, assumes same - namespace as where NetObserv is deployed. If - the namespace is different, the config map or - the secret will be copied so that it can be - mounted as required. + containing the file. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, + the config map or the secret is copied so that + it can be mounted as required. type: string type: description: 'Type for the file reference: "configmap" diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md index 4a4951179..2ff0298fc 100644 --- a/docs/FlowCollector.md +++ b/docs/FlowCollector.md @@ -8705,7 +8705,7 @@ Defines the desired state of the FlowCollector resource.

*: the mention
@@ -8785,7 +8785,7 @@ Agent configuration for flows extraction. @@ -8812,7 +8812,7 @@ Agent configuration for flows extraction. @@ -8838,7 +8838,7 @@ Agent configuration for flows extraction. @@ -8864,7 +8864,7 @@ Agent configuration for flows extraction. @@ -9008,7 +9008,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -9017,7 +9017,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -10437,7 +10437,7 @@ Kafka configuration, such as the address and topic, to send enriched flows to. @@ -10538,7 +10538,7 @@ Reference to the secret or config map containing the client ID @@ -10590,7 +10590,7 @@ Reference to the secret or config map containing the client secret @@ -10701,7 +10701,7 @@ TLS client configuration. When using TLS, verify that the address matches the Ka @@ -10760,7 +10760,7 @@ TLS client configuration. When using TLS, verify that the address matches the Ka @@ -10807,7 +10807,7 @@ Kafka configuration, allowing to use Kafka as a broker as part of the flow colle @@ -10908,7 +10908,7 @@ Reference to the secret or config map containing the client ID @@ -10960,7 +10960,7 @@ Reference to the secret or config map containing the client secret @@ -11071,7 +11071,7 @@ TLS client configuration. When using TLS, verify that the address matches the Ka @@ -11130,7 +11130,7 @@ TLS client configuration. When using TLS, verify that the address matches the Ka @@ -11326,7 +11326,7 @@ Loki configuration for MANUAL mode. This is the more flexible configuration. It @@ -11359,7 +11359,7 @@ Loki configuration for MANUAL mode. This is the more flexible configuration. It @@ -11475,7 +11475,7 @@ TLS client configuration for Loki status URL. @@ -11534,7 +11534,7 @@ TLS client configuration for Loki status URL. @@ -11645,7 +11645,7 @@ TLS client configuration for Loki URL. @@ -11704,7 +11704,7 @@ TLS client configuration for Loki URL. @@ -11832,7 +11832,7 @@ TLS client configuration for Loki URL. @@ -11993,7 +11993,7 @@ TLS client configuration for Loki URL. @@ -12985,7 +12985,7 @@ TLS configuration. @@ -13043,7 +13043,7 @@ TLS configuration when `type` is set to `PROVIDED`. @@ -13066,7 +13066,7 @@ TLS configuration when `type` is set to `PROVIDED`. -Reference to the CA file will be ignored +Reference to the CA file when `type` is set to `PROVIDED`.
namespace string - Namespace where NetObserv pods are deployed. If empty, the namespace of the operator is going to be used.
+ Namespace where NetObserv pods are deployed.

Default: netobserv
cacheActiveTimeout string - `cacheActiveTimeout` is the max period during which the reporter will aggregate flows before sending. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection.
+ `cacheActiveTimeout` is the max period during which the reporter aggregates flows before sending. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection.

Default: 5s
excludeInterfaces []string - `excludeInterfaces` contains the interface names that will be excluded from flow tracing. An entry is enclosed by slashes, such as `/br-/`, is matched as a regular expression. Otherwise it is matched as a case-sensitive string.
+ `excludeInterfaces` contains the interface names that are excluded from flow tracing. An entry is enclosed by slashes, such as `/br-/`, is matched as a regular expression. Otherwise it is matched as a case-sensitive string.

Default: [lo]
interfaces []string - `interfaces` contains the interface names from where flows will be collected. If empty, the agent will fetch all the interfaces in the system, excepting the ones listed in ExcludeInterfaces. An entry is enclosed by slashes, such as `/br-/`, is matched as a regular expression. Otherwise it is matched as a case-sensitive string.
+ `interfaces` contains the interface names from where flows are collected. If empty, the agent fetches all the interfaces in the system, excepting the ones listed in ExcludeInterfaces. An entry is enclosed by slashes, such as `/br-/`, is matched as a regular expression. Otherwise it is matched as a case-sensitive string.
false
privileged boolean - Privileged mode for the eBPF Agent container. In general this setting can be ignored or set to false: in that case, the operator will set granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, to enable its correct operation. If for some reason these capabilities cannot be set, such as if an old kernel version not knowing CAP_BPF is in use, then you can turn on this mode for more global privileges.
+ Privileged mode for the eBPF Agent container. In general this setting can be ignored or set to false: in that case, the operator sets granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, to enable its correct operation. If for some reason these capabilities cannot be set, such as if an old kernel version not knowing CAP_BPF is in use, then you can turn on this mode for more global privileges.
false
cacheActiveTimeout string - `cacheActiveTimeout` is the max period during which the reporter will aggregate flows before sending
+ `cacheActiveTimeout` is the max period during which the reporter aggregates flows before sending.

Default: 20s
cacheMaxFlows integer - `cacheMaxFlows` is the max number of flows in an aggregate; when reached, the reporter sends the flows
+ `cacheMaxFlows` is the max number of flows in an aggregate; when reached, the reporter sends the flows.

Format: int32
Default: 400
@@ -9299,7 +9299,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims.
status enum - `status` describes the desired status regarding deploying an horizontal pod autoscaler.
- `DISABLED` will not deploy an horizontal pod autoscaler.
- `ENABLED` will deploy an horizontal pod autoscaler.

+ `status` describes the desired status regarding deploying an horizontal pod autoscaler.
- `DISABLED` does not deploy an horizontal pod autoscaler.
- `ENABLED` deploys an horizontal pod autoscaler.


Enum: DISABLED, ENABLED
Default: DISABLED
@@ -10236,7 +10236,7 @@ target specifies the target value for the given metric
name string - Name of the filter, that will be displayed in Console
+ Name of the filter, that is displayed in the Console
true
topic string - Kafka topic to use. It must exist, NetObserv will not create it.
+ Kafka topic to use. It must exist, NetObserv does not create it.

Default:
namespace string - Namespace of the config map or secret containing the file. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.

Default:
namespace string - Namespace of the config map or secret containing the file. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.

Default:
namespace string - Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.

Default:
namespace string - Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.

Default:
topic string - Kafka topic to use. It must exist, NetObserv will not create it.
+ Kafka topic to use. It must exist, NetObserv does not create it.

Default:
namespace string - Namespace of the config map or secret containing the file. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.

Default:
namespace string - Namespace of the config map or secret containing the file. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.

Default:
namespace string - Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.

Default:
namespace string - Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.

Default:
authToken enum - `authToken` describes the way to get a token to authenticate to Loki.
- `DISABLED` will not send any token with the request.
- `FORWARD` will forward the user token for authorization.
- `HOST` [deprecated (*)] - will use the local pod service account to authenticate to Loki.
When using the Loki Operator, this must be set to `FORWARD`.
+ `authToken` describes the way to get a token to authenticate to Loki.
- `DISABLED` does not send any token with the request.
- `FORWARD` forwards the user token for authorization.
- `HOST` [deprecated (*)] - uses the local pod service account to authenticate to Loki.
When using the Loki Operator, this must be set to `FORWARD`.

Enum: DISABLED, HOST, FORWARD
Default: DISABLED
@@ -11345,7 +11345,7 @@ Loki configuration for MANUAL mode. This is the more flexible configuration. It
querierUrl string - `querierURL` specifies the address of the Loki querier service, in case it is different from the Loki ingester URL. If empty, the URL value will be used (assuming that the Loki ingester and querier are in the same server). When using the Loki Operator, do not set it, since ingestion and queries use the Loki gateway.
+ `querierURL` specifies the address of the Loki querier service, in case it is different from the Loki ingester URL. If empty, the URL value is used (assuming that the Loki ingester and querier are in the same server). When using the Loki Operator, do not set it, since ingestion and queries use the Loki gateway.
false
statusUrl string - `statusURL` specifies the address of the Loki `/ready`, `/metrics` and `/config` endpoints, in case it is different from the Loki querier URL. If empty, the `querierURL` value will be used. This is useful to show error messages and some context in the frontend. When using the Loki Operator, set it to the Loki HTTP query frontend service, for example https://loki-query-frontend-http.netobserv.svc:3100/. `statusTLS` configuration will be used when `statusUrl` is set.
+ `statusURL` specifies the address of the Loki `/ready`, `/metrics` and `/config` endpoints, in case it is different from the Loki querier URL. If empty, the `querierURL` value is used. This is useful to show error messages and some context in the frontend. When using the Loki Operator, set it to the Loki HTTP query frontend service, for example https://loki-query-frontend-http.netobserv.svc:3100/. `statusTLS` configuration is used when `statusUrl` is set.
false
namespace string - Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.

Default:
namespace string - Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.

Default:
namespace string - Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.

Default:
namespace string - Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.

Default:
kafkaConsumerBatchSize integer - `kafkaConsumerBatchSize` indicates to the broker the maximum batch size, in bytes, that the consumer will accept. Ignored when not using Kafka. Default: 10MB.
+ `kafkaConsumerBatchSize` indicates to the broker the maximum batch size, in bytes, that the consumer accepts. Ignored when not using Kafka. Default: 10MB.

Default: 10485760
status enum - `status` describes the desired status regarding deploying an horizontal pod autoscaler.
- `DISABLED` will not deploy an horizontal pod autoscaler.
- `ENABLED` will deploy an horizontal pod autoscaler.

+ `status` describes the desired status regarding deploying an horizontal pod autoscaler.
- `DISABLED` does not deploy an horizontal pod autoscaler.
- `ENABLED` deploys an horizontal pod autoscaler.


Enum: DISABLED, ENABLED
Default: DISABLED
@@ -12969,7 +12969,7 @@ TLS configuration.
insecureSkipVerify boolean - insecureSkipVerify allows skipping client-side verification of the provided certificate If set to true, ProvidedCaFile field will be ignored
+ insecureSkipVerify allows skipping client-side verification of the provided certificate. If set to true, the `providedCaFile` field is ignored.

Default: false
providedCaFile object - Reference to the CA file will be ignored
+ Reference to the CA file when `type` is set to `PROVIDED`.
false
namespace string - Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.

Default:
@@ -13095,7 +13095,7 @@ Reference to the CA file will be ignored From 6eb8ce4f3f2c2a6a5fbe28b77a770adcc6ce5a9a Mon Sep 17 00:00:00 2001 From: acmenezes Date: Thu, 21 Sep 2023 16:32:35 -0400 Subject: [PATCH 09/17] ADD conversion on v1alpha1 to v1beta2 for loki modes Signed-off-by: acmenezes --- api/v1alpha1/flowcollector_webhook.go | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/api/v1alpha1/flowcollector_webhook.go b/api/v1alpha1/flowcollector_webhook.go index 89d7173f9..36564c0fe 100644 --- a/api/v1alpha1/flowcollector_webhook.go +++ b/api/v1alpha1/flowcollector_webhook.go @@ -70,8 +70,24 @@ func (r *FlowCollector) ConvertTo(dstRaw conversion.Hub) error { } dst.Spec.Kafka.SASL = restored.Spec.Kafka.SASL - dst.Spec.Loki.Manual = restored.Spec.Loki.Manual + // restore loki configuration from metadata + if len(dst.Spec.Loki.Mode) > 0 { + dst.Spec.Loki.Mode = restored.Spec.Loki.Mode + dst.Spec.Loki.Manual = restored.Spec.Loki.Manual + dst.Spec.Loki.LokiStack = restored.Spec.Loki.LokiStack + } else { + // fallback on previous Manual mode + dst.Spec.Loki.Mode = v1beta2.LokiModeManual + dst.Spec.Loki.Manual.IngesterURL = r.Spec.Loki.URL + dst.Spec.Loki.Manual.QuerierURL = r.Spec.Loki.QuerierURL + dst.Spec.Loki.Manual.StatusURL = r.Spec.Loki.StatusURL + dst.Spec.Loki.Manual.TenantID = r.Spec.Loki.TenantID + dst.Spec.Loki.Manual.AuthToken = r.Spec.Loki.AuthToken + if err := Convert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS(&r.Spec.Loki.TLS, &dst.Spec.Loki.Manual.TLS, nil); err != nil { + return fmt.Errorf("copying v1alplha1.Loki.TLS into v1beta2.Loki.Manual.TLS: %w", err) + } + } dst.Spec.Processor.Metrics.Server.TLS.InsecureSkipVerify = restored.Spec.Processor.Metrics.Server.TLS.InsecureSkipVerify dst.Spec.Processor.Metrics.Server.TLS.ProvidedCaFile = restored.Spec.Processor.Metrics.Server.TLS.ProvidedCaFile From b9f2a0977c74f166ec16bc203a3ba3cda1e00468 Mon Sep 17 00:00:00 2001 From: Julien Pinsonneau Date: Mon, 2 Oct 2023 11:11:26 +0200 Subject: [PATCH 10/17] update bundle and fix test --- .../flows.netobserv.io_flowcollectors.yaml | 218 +++++++++--------- controllers/flowlogspipeline/flp_test.go | 2 +- 2 files changed, 110 insertions(+), 110 deletions(-) diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml index 2022b6fb2..60bcc29c5 100644 --- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml +++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml @@ -4971,11 +4971,11 @@ spec: cacheActiveTimeout: default: 5s description: '`cacheActiveTimeout` is the max period during - which the reporter will aggregate flows before sending. - Increasing `cacheMaxFlows` and `cacheActiveTimeout` can - decrease the network traffic overhead and the CPU load, - however you can expect higher memory consumption and an - increased latency in the flow collection.' + which the reporter aggregates flows before sending. Increasing + `cacheMaxFlows` and `cacheActiveTimeout` can decrease the + network traffic overhead and the CPU load, however you can + expect higher memory consumption and an increased latency + in the flow collection.' pattern: ^\d+(ns|ms|s|m)?$ type: string cacheMaxFlows: @@ -5011,7 +5011,7 @@ spec: default: - lo description: '`excludeInterfaces` contains the interface names - that will be excluded from flow tracing. An entry is enclosed + that are excluded from flow tracing. An entry is enclosed by slashes, such as `/br-/`, is matched as a regular expression. Otherwise it is matched as a case-sensitive string.' items: @@ -5054,10 +5054,10 @@ spec: type: string interfaces: description: '`interfaces` contains the interface names from - where flows will be collected. If empty, the agent will - fetch all the interfaces in the system, excepting the ones - listed in ExcludeInterfaces. An entry is enclosed by slashes, - such as `/br-/`, is matched as a regular expression. Otherwise + where flows are collected. If empty, the agent fetches all + the interfaces in the system, excepting the ones listed + in ExcludeInterfaces. An entry is enclosed by slashes, such + as `/br-/`, is matched as a regular expression. Otherwise it is matched as a case-sensitive string.' items: type: string @@ -5084,12 +5084,12 @@ spec: privileged: description: 'Privileged mode for the eBPF Agent container. In general this setting can be ignored or set to false: - in that case, the operator will set granular capabilities - (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, - to enable its correct operation. If for some reason these - capabilities cannot be set, such as if an old kernel version - not knowing CAP_BPF is in use, then you can turn on this - mode for more global privileges.' + in that case, the operator sets granular capabilities (BPF, + PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, to enable + its correct operation. If for some reason these capabilities + cannot be set, such as if an old kernel version not knowing + CAP_BPF is in use, then you can turn on this mode for more + global privileges.' type: boolean resources: default: @@ -5163,13 +5163,13 @@ spec: cacheActiveTimeout: default: 20s description: '`cacheActiveTimeout` is the max period during - which the reporter will aggregate flows before sending' + which the reporter aggregates flows before sending.' pattern: ^\d+(ns|ms|s|m)?$ type: string cacheMaxFlows: default: 400 description: '`cacheMaxFlows` is the max number of flows in - an aggregate; when reached, the reporter sends the flows' + an aggregate; when reached, the reporter sends the flows.' format: int32 minimum: 0 type: integer @@ -5767,8 +5767,8 @@ spec: default: DISABLED description: '`status` describes the desired status regarding deploying an horizontal pod autoscaler.
- `DISABLED` - will not deploy an horizontal pod autoscaler.
- `ENABLED` - will deploy an horizontal pod autoscaler.
' + does not deploy an horizontal pod autoscaler.
- `ENABLED` + deploys an horizontal pod autoscaler.
' enum: - DISABLED - ENABLED @@ -5864,8 +5864,8 @@ spec: example, `filter: {"src_namespace": "namespace1,namespace2"}`.' type: object name: - description: Name of the filter, that will be displayed - in Console + description: Name of the filter, that is displayed in the + Console type: string required: - filter @@ -6013,11 +6013,11 @@ spec: namespace: default: "" description: Namespace of the config map or secret - containing the file. If omitted, assumes same - namespace as where NetObserv is deployed. If the - namespace is different, the config map or the - secret will be copied so that it can be mounted - as required. + containing the file. If omitted, the default is + to use the same namespace as where NetObserv is + deployed. If the namespace is different, the config + map or the secret is copied so that it can be + mounted as required. type: string type: description: 'Type for the file reference: "configmap" @@ -6042,11 +6042,11 @@ spec: namespace: default: "" description: Namespace of the config map or secret - containing the file. If omitted, assumes same - namespace as where NetObserv is deployed. If the - namespace is different, the config map or the - secret will be copied so that it can be mounted - as required. + containing the file. If omitted, the default is + to use the same namespace as where NetObserv is + deployed. If the namespace is different, the config + map or the secret is copied so that it can be + mounted as required. type: string type: description: 'Type for the file reference: "configmap" @@ -6092,11 +6092,11 @@ spec: namespace: default: "" description: Namespace of the config map or secret - containing certificates. If omitted, assumes the - same namespace as where NetObserv is deployed. - If the namespace is different, the config map - or the secret will be copied so that it can be - mounted as required. + containing certificates. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, the + config map or the secret is copied so that it + can be mounted as required. type: string type: description: 'Type for the certificate reference: @@ -6138,11 +6138,11 @@ spec: namespace: default: "" description: Namespace of the config map or secret - containing certificates. If omitted, assumes the - same namespace as where NetObserv is deployed. - If the namespace is different, the config map - or the secret will be copied so that it can be - mounted as required. + containing certificates. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, the + config map or the secret is copied so that it + can be mounted as required. type: string type: description: 'Type for the certificate reference: @@ -6156,7 +6156,7 @@ spec: topic: default: "" description: Kafka topic to use. It must exist, NetObserv - will not create it. + does not create it. type: string required: - address @@ -6199,10 +6199,10 @@ spec: namespace: default: "" description: Namespace of the config map or secret containing - the file. If omitted, assumes same namespace as where - NetObserv is deployed. If the namespace is different, - the config map or the secret will be copied so that - it can be mounted as required. + the file. If omitted, the default is to use the same + namespace as where NetObserv is deployed. If the namespace + is different, the config map or the secret is copied + so that it can be mounted as required. type: string type: description: 'Type for the file reference: "configmap" @@ -6226,10 +6226,10 @@ spec: namespace: default: "" description: Namespace of the config map or secret containing - the file. If omitted, assumes same namespace as where - NetObserv is deployed. If the namespace is different, - the config map or the secret will be copied so that - it can be mounted as required. + the file. If omitted, the default is to use the same + namespace as where NetObserv is deployed. If the namespace + is different, the config map or the secret is copied + so that it can be mounted as required. type: string type: description: 'Type for the file reference: "configmap" @@ -6274,10 +6274,10 @@ spec: namespace: default: "" description: Namespace of the config map or secret containing - certificates. If omitted, assumes the same namespace - as where NetObserv is deployed. If the namespace is - different, the config map or the secret will be copied - so that it can be mounted as required. + certificates. If omitted, the default is to use the + same namespace as where NetObserv is deployed. If the + namespace is different, the config map or the secret + is copied so that it can be mounted as required. type: string type: description: 'Type for the certificate reference: `configmap` @@ -6318,10 +6318,10 @@ spec: namespace: default: "" description: Namespace of the config map or secret containing - certificates. If omitted, assumes the same namespace - as where NetObserv is deployed. If the namespace is - different, the config map or the secret will be copied - so that it can be mounted as required. + certificates. If omitted, the default is to use the + same namespace as where NetObserv is deployed. If the + namespace is different, the config map or the secret + is copied so that it can be mounted as required. type: string type: description: 'Type for the certificate reference: `configmap` @@ -6334,7 +6334,7 @@ spec: type: object topic: default: "" - description: Kafka topic to use. It must exist, NetObserv will + description: Kafka topic to use. It must exist, NetObserv does not create it. type: string required: @@ -6380,12 +6380,12 @@ spec: authToken: default: DISABLED description: '`authToken` describes the way to get a token - to authenticate to Loki.
- `DISABLED` will not send - any token with the request.
- `FORWARD` will forward - the user token for authorization.
- `HOST` [deprecated - (*)] - will use the local pod service account to authenticate - to Loki.
When using the Loki Operator, this must be - set to `FORWARD`.' + to authenticate to Loki.
- `DISABLED` does not send + any token with the request.
- `FORWARD` forwards the + user token for authorization.
- `HOST` [deprecated (*)] + - uses the local pod service account to authenticate to + Loki.
When using the Loki Operator, this must be set + to `FORWARD`.' enum: - DISABLED - HOST @@ -6401,8 +6401,8 @@ spec: querierUrl: description: '`querierURL` specifies the address of the Loki querier service, in case it is different from the Loki ingester - URL. If empty, the URL value will be used (assuming that - the Loki ingester and querier are in the same server). When + URL. If empty, the URL value is used (assuming that the + Loki ingester and querier are in the same server). When using the Loki Operator, do not set it, since ingestion and queries use the Loki gateway.' type: string @@ -6429,11 +6429,11 @@ spec: namespace: default: "" description: Namespace of the config map or secret - containing certificates. If omitted, assumes the - same namespace as where NetObserv is deployed. If - the namespace is different, the config map or the - secret will be copied so that it can be mounted - as required. + containing certificates. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, the + config map or the secret is copied so that it can + be mounted as required. type: string type: description: 'Type for the certificate reference: @@ -6474,11 +6474,11 @@ spec: namespace: default: "" description: Namespace of the config map or secret - containing certificates. If omitted, assumes the - same namespace as where NetObserv is deployed. If - the namespace is different, the config map or the - secret will be copied so that it can be mounted - as required. + containing certificates. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, the + config map or the secret is copied so that it can + be mounted as required. type: string type: description: 'Type for the certificate reference: @@ -6493,11 +6493,11 @@ spec: description: '`statusURL` specifies the address of the Loki `/ready`, `/metrics` and `/config` endpoints, in case it is different from the Loki querier URL. If empty, the `querierURL` - value will be used. This is useful to show error messages - and some context in the frontend. When using the Loki Operator, + value is used. This is useful to show error messages and + some context in the frontend. When using the Loki Operator, set it to the Loki HTTP query frontend service, for example https://loki-query-frontend-http.netobserv.svc:3100/. `statusTLS` - configuration will be used when `statusUrl` is set.' + configuration is used when `statusUrl` is set.' type: string tenantID: default: netobserv @@ -6529,11 +6529,11 @@ spec: namespace: default: "" description: Namespace of the config map or secret - containing certificates. If omitted, assumes the - same namespace as where NetObserv is deployed. If - the namespace is different, the config map or the - secret will be copied so that it can be mounted - as required. + containing certificates. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, the + config map or the secret is copied so that it can + be mounted as required. type: string type: description: 'Type for the certificate reference: @@ -6574,11 +6574,11 @@ spec: namespace: default: "" description: Namespace of the config map or secret - containing certificates. If omitted, assumes the - same namespace as where NetObserv is deployed. If - the namespace is different, the config map or the - secret will be copied so that it can be mounted - as required. + containing certificates. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, the + config map or the secret is copied so that it can + be mounted as required. type: string type: description: 'Type for the certificate reference: @@ -6629,8 +6629,7 @@ spec: type: object namespace: default: netobserv - description: Namespace where NetObserv pods are deployed. If empty, - the namespace of the operator is going to be used. + description: Namespace where NetObserv pods are deployed. type: string processor: description: '`processor` defines the settings of the component that @@ -7232,8 +7231,8 @@ spec: default: DISABLED description: '`status` describes the desired status regarding deploying an horizontal pod autoscaler.
- `DISABLED` - will not deploy an horizontal pod autoscaler.
- `ENABLED` - will deploy an horizontal pod autoscaler.
' + does not deploy an horizontal pod autoscaler.
- `ENABLED` + deploys an horizontal pod autoscaler.
' enum: - DISABLED - ENABLED @@ -7242,7 +7241,7 @@ spec: kafkaConsumerBatchSize: default: 10485760 description: '`kafkaConsumerBatchSize` indicates to the broker - the maximum batch size, in bytes, that the consumer will accept. + the maximum batch size, in bytes, that the consumer accepts. Ignored when not using Kafka. Default: 10MB.' type: integer kafkaConsumerQueueCapacity: @@ -7344,8 +7343,8 @@ spec: insecureSkipVerify: default: false description: insecureSkipVerify allows skipping client-side - verification of the provided certificate If set - to true, ProvidedCaFile field will be ignored + verification of the provided certificate. If set + to true, the `providedCaFile` field is ignored. type: boolean provided: description: TLS configuration when `type` is set @@ -7369,11 +7368,11 @@ spec: namespace: default: "" description: Namespace of the config map or secret - containing certificates. If omitted, assumes - the same namespace as where NetObserv is deployed. - If the namespace is different, the config map - or the secret will be copied so that it can - be mounted as required. + containing certificates. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, + the config map or the secret is copied so that + it can be mounted as required. type: string type: description: 'Type for the certificate reference: @@ -7384,7 +7383,8 @@ spec: type: string type: object providedCaFile: - description: Reference to the CA file will be ignored + description: Reference to the CA file when `type` + is set to `PROVIDED`. properties: file: description: File name within the config map or @@ -7397,11 +7397,11 @@ spec: namespace: default: "" description: Namespace of the config map or secret - containing the file. If omitted, assumes same - namespace as where NetObserv is deployed. If - the namespace is different, the config map or - the secret will be copied so that it can be - mounted as required. + containing the file. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, + the config map or the secret is copied so that + it can be mounted as required. type: string type: description: 'Type for the file reference: "configmap" diff --git a/controllers/flowlogspipeline/flp_test.go b/controllers/flowlogspipeline/flp_test.go index f771de096..7a44a600c 100644 --- a/controllers/flowlogspipeline/flp_test.go +++ b/controllers/flowlogspipeline/flp_test.go @@ -715,7 +715,7 @@ func TestConfigMapShouldDeserializeAsJSONWithLokiStack(t *testing.T) { assert.Equal(loki.MaxBackoff.Duration.String(), lokiCfg.MaxBackoff) assert.EqualValues(*loki.MaxRetries, lokiCfg.MaxRetries) assert.EqualValues(loki.BatchSize, lokiCfg.BatchSize) - assert.EqualValues([]string{"SrcK8S_Namespace", "SrcK8S_OwnerName", "DstK8S_Namespace", "DstK8S_OwnerName", "FlowDirection", "_RecordType"}, lokiCfg.Labels) + assert.EqualValues([]string{"SrcK8S_Namespace", "SrcK8S_OwnerName", "SrcK8S_Type", "DstK8S_Namespace", "DstK8S_OwnerName", "DstK8S_Type", "FlowDirection", "Duplicate", "_RecordType"}, lokiCfg.Labels) assert.Equal(`{app="netobserv-flowcollector"}`, fmt.Sprintf("%v", lokiCfg.StaticLabels)) assert.Equal(cfg.Processor.Metrics.Server.Port, int32(decoded.MetricsSettings.Port)) From e50eb53126cddd0e83f5932d6b8ef0e9050a3f15 Mon Sep 17 00:00:00 2001 From: Julien Pinsonneau Date: Mon, 2 Oct 2023 11:50:40 +0200 Subject: [PATCH 11/17] fix webhooks conversions --- api/v1alpha1/flowcollector_webhook.go | 54 +++++++++++++-------------- api/v1beta1/flowcollector_webhook.go | 39 +++++++++---------- 2 files changed, 44 insertions(+), 49 deletions(-) diff --git a/api/v1alpha1/flowcollector_webhook.go b/api/v1alpha1/flowcollector_webhook.go index 36564c0fe..5852c2d24 100644 --- a/api/v1alpha1/flowcollector_webhook.go +++ b/api/v1alpha1/flowcollector_webhook.go @@ -41,56 +41,54 @@ func (r *FlowCollector) ConvertTo(dstRaw conversion.Hub) error { // Manually restore data. restored := &v1beta2.FlowCollector{} if ok, err := utilconversion.UnmarshalData(r, restored); err != nil || !ok { + // fallback on current loki config as Manual mode if metadata are not available + dst.Spec.Loki.Mode = v1beta2.LokiModeManual + dst.Spec.Loki.Manual.IngesterURL = r.Spec.Loki.URL + dst.Spec.Loki.Manual.QuerierURL = r.Spec.Loki.QuerierURL + dst.Spec.Loki.Manual.StatusURL = r.Spec.Loki.StatusURL + dst.Spec.Loki.Manual.TenantID = r.Spec.Loki.TenantID + dst.Spec.Loki.Manual.AuthToken = r.Spec.Loki.AuthToken + if err := Convert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS(&r.Spec.Loki.TLS, &dst.Spec.Loki.Manual.TLS, nil); err != nil { + return fmt.Errorf("copying v1alplha1.Loki.TLS into v1beta2.Loki.Manual.TLS: %w", err) + } return err } - dst.Spec.Processor.LogTypes = restored.Spec.Processor.LogTypes + // Agent + if restored.Spec.Agent.EBPF.Features != nil { + dst.Spec.Agent.EBPF.Features = make([]v1beta2.AgentFeature, len(restored.Spec.Agent.EBPF.Features)) + copy(dst.Spec.Agent.EBPF.Features, restored.Spec.Agent.EBPF.Features) + } + // Processor + dst.Spec.Processor.LogTypes = restored.Spec.Processor.LogTypes if restored.Spec.Processor.ConversationHeartbeatInterval != nil { dst.Spec.Processor.ConversationHeartbeatInterval = restored.Spec.Processor.ConversationHeartbeatInterval } - if restored.Spec.Processor.ConversationEndTimeout != nil { dst.Spec.Processor.ConversationEndTimeout = restored.Spec.Processor.ConversationEndTimeout } - if restored.Spec.Processor.ConversationTerminatingTimeout != nil { dst.Spec.Processor.ConversationTerminatingTimeout = restored.Spec.Processor.ConversationTerminatingTimeout } - if restored.Spec.Processor.Metrics.DisableAlerts != nil { dst.Spec.Processor.Metrics.DisableAlerts = restored.Spec.Processor.Metrics.DisableAlerts } + dst.Spec.Processor.Metrics.Server.TLS.InsecureSkipVerify = restored.Spec.Processor.Metrics.Server.TLS.InsecureSkipVerify + dst.Spec.Processor.Metrics.Server.TLS.ProvidedCaFile = restored.Spec.Processor.Metrics.Server.TLS.ProvidedCaFile - dst.Spec.Loki.Enable = restored.Spec.Loki.Enable - - if restored.Spec.Agent.EBPF.Features != nil { - dst.Spec.Agent.EBPF.Features = make([]v1beta2.AgentFeature, len(restored.Spec.Agent.EBPF.Features)) - copy(dst.Spec.Agent.EBPF.Features, restored.Spec.Agent.EBPF.Features) - } - + // Kafka dst.Spec.Kafka.SASL = restored.Spec.Kafka.SASL - // restore loki configuration from metadata - if len(dst.Spec.Loki.Mode) > 0 { - dst.Spec.Loki.Mode = restored.Spec.Loki.Mode - dst.Spec.Loki.Manual = restored.Spec.Loki.Manual + // Loki + dst.Spec.Loki.Enable = restored.Spec.Loki.Enable + dst.Spec.Loki.Mode = restored.Spec.Loki.Mode + dst.Spec.Loki.Manual = restored.Spec.Loki.Manual + if restored.Spec.Loki.LokiStack != nil { dst.Spec.Loki.LokiStack = restored.Spec.Loki.LokiStack - } else { - // fallback on previous Manual mode - dst.Spec.Loki.Mode = v1beta2.LokiModeManual - dst.Spec.Loki.Manual.IngesterURL = r.Spec.Loki.URL - dst.Spec.Loki.Manual.QuerierURL = r.Spec.Loki.QuerierURL - dst.Spec.Loki.Manual.StatusURL = r.Spec.Loki.StatusURL - dst.Spec.Loki.Manual.TenantID = r.Spec.Loki.TenantID - dst.Spec.Loki.Manual.AuthToken = r.Spec.Loki.AuthToken - if err := Convert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS(&r.Spec.Loki.TLS, &dst.Spec.Loki.Manual.TLS, nil); err != nil { - return fmt.Errorf("copying v1alplha1.Loki.TLS into v1beta2.Loki.Manual.TLS: %w", err) - } } - dst.Spec.Processor.Metrics.Server.TLS.InsecureSkipVerify = restored.Spec.Processor.Metrics.Server.TLS.InsecureSkipVerify - dst.Spec.Processor.Metrics.Server.TLS.ProvidedCaFile = restored.Spec.Processor.Metrics.Server.TLS.ProvidedCaFile + // Exporters if restored.Spec.Exporters != nil { for _, restoredExp := range restored.Spec.Exporters { if !isExporterIn(restoredExp, dst.Spec.Exporters) { diff --git a/api/v1beta1/flowcollector_webhook.go b/api/v1beta1/flowcollector_webhook.go index de4b16da1..8679568d8 100644 --- a/api/v1beta1/flowcollector_webhook.go +++ b/api/v1beta1/flowcollector_webhook.go @@ -41,42 +41,39 @@ func (r *FlowCollector) ConvertTo(dstRaw conversion.Hub) error { // Manually restore data. restored := &v1beta2.FlowCollector{} if ok, err := utilconversion.UnmarshalData(r, restored); err != nil || !ok { + // fallback on current loki config as Manual mode if metadata are not available + dst.Spec.Loki.Mode = v1beta2.LokiModeManual + dst.Spec.Loki.Manual.IngesterURL = r.Spec.Loki.URL + dst.Spec.Loki.Manual.QuerierURL = r.Spec.Loki.QuerierURL + dst.Spec.Loki.Manual.StatusURL = r.Spec.Loki.StatusURL + dst.Spec.Loki.Manual.TenantID = r.Spec.Loki.TenantID + dst.Spec.Loki.Manual.AuthToken = r.Spec.Loki.AuthToken + if err := Convert_v1beta1_ClientTLS_To_v1beta2_ClientTLS(&r.Spec.Loki.TLS, &dst.Spec.Loki.Manual.TLS, nil); err != nil { + return fmt.Errorf("copying v1beta1.Loki.TLS into v1beta2.Loki.Manual.TLS: %w", err) + } + if err := Convert_v1beta1_ClientTLS_To_v1beta2_ClientTLS(&r.Spec.Loki.StatusTLS, &dst.Spec.Loki.Manual.StatusTLS, nil); err != nil { + return fmt.Errorf("copying v1beta1.Loki.StatusTLS into v1beta2.Loki.Manual.StatusTLS: %w", err) + } return err } + // Processor dst.Spec.Processor.LogTypes = restored.Spec.Processor.LogTypes - if restored.Spec.Processor.ConversationHeartbeatInterval != nil { dst.Spec.Processor.ConversationHeartbeatInterval = restored.Spec.Processor.ConversationHeartbeatInterval } - if restored.Spec.Processor.ConversationEndTimeout != nil { dst.Spec.Processor.ConversationEndTimeout = restored.Spec.Processor.ConversationEndTimeout } - if restored.Spec.Processor.Metrics.DisableAlerts != nil { dst.Spec.Processor.Metrics.DisableAlerts = restored.Spec.Processor.Metrics.DisableAlerts } - // restore loki configuration from metadata - if len(dst.Spec.Loki.Mode) > 0 { - dst.Spec.Loki.Mode = restored.Spec.Loki.Mode - dst.Spec.Loki.Manual = restored.Spec.Loki.Manual + // Loki + dst.Spec.Loki.Mode = restored.Spec.Loki.Mode + dst.Spec.Loki.Manual = restored.Spec.Loki.Manual + if restored.Spec.Loki.LokiStack != nil { dst.Spec.Loki.LokiStack = restored.Spec.Loki.LokiStack - } else { - // fallback on previous Manual mode - dst.Spec.Loki.Mode = v1beta2.LokiModeManual - dst.Spec.Loki.Manual.IngesterURL = r.Spec.Loki.URL - dst.Spec.Loki.Manual.QuerierURL = r.Spec.Loki.QuerierURL - dst.Spec.Loki.Manual.StatusURL = r.Spec.Loki.StatusURL - dst.Spec.Loki.Manual.TenantID = r.Spec.Loki.TenantID - dst.Spec.Loki.Manual.AuthToken = r.Spec.Loki.AuthToken - if err := Convert_v1beta1_ClientTLS_To_v1beta2_ClientTLS(&r.Spec.Loki.TLS, &dst.Spec.Loki.Manual.TLS, nil); err != nil { - return fmt.Errorf("copying v1beta1.Loki.TLS into v1beta2.Loki.Manual.TLS: %w", err) - } - if err := Convert_v1beta1_ClientTLS_To_v1beta2_ClientTLS(&r.Spec.Loki.StatusTLS, &dst.Spec.Loki.Manual.StatusTLS, nil); err != nil { - return fmt.Errorf("copying v1beta1.Loki.StatusTLS into v1beta2.Loki.Manual.StatusTLS: %w", err) - } } return nil From 656cb0fb4fd5982bad0d6f0110a2eb1bb4f2e898 Mon Sep 17 00:00:00 2001 From: Julien Pinsonneau Date: Tue, 3 Oct 2023 16:39:16 +0200 Subject: [PATCH 12/17] addressed feedback --- api/v1alpha1/flowcollector_types.go | 1 - api/v1alpha1/zz_generated.conversion.go | 8 ++++++-- api/v1beta1/flowcollector_types.go | 3 +-- api/v1beta1/zz_generated.conversion.go | 8 ++++++-- api/v1beta2/flowcollector_types.go | 3 --- bundle/manifests/flows.netobserv.io_flowcollectors.yaml | 4 ++-- config/crd/bases/flows.netobserv.io_flowcollectors.yaml | 4 ++-- 7 files changed, 17 insertions(+), 14 deletions(-) diff --git a/api/v1alpha1/flowcollector_types.go b/api/v1alpha1/flowcollector_types.go index dc9ae8aaa..36613224c 100644 --- a/api/v1alpha1/flowcollector_types.go +++ b/api/v1alpha1/flowcollector_types.go @@ -55,7 +55,6 @@ type FlowCollectorSpec struct { Processor FlowCollectorFLP `json:"processor,omitempty"` // loki, the flow store, client settings. - // +k8s:conversion-gen=false Loki FlowCollectorLoki `json:"loki,omitempty"` // consolePlugin defines the settings related to the OpenShift Console plugin, when available. diff --git a/api/v1alpha1/zz_generated.conversion.go b/api/v1alpha1/zz_generated.conversion.go index 2cab7eca7..1187cde8a 100644 --- a/api/v1alpha1/zz_generated.conversion.go +++ b/api/v1alpha1/zz_generated.conversion.go @@ -860,7 +860,9 @@ func autoConvert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in *Flo if err := Convert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(&in.Processor, &out.Processor, s); err != nil { return err } - // INFO: in.Loki opted out of conversion generation + if err := Convert_v1alpha1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(&in.Loki, &out.Loki, s); err != nil { + return err + } if err := Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(&in.ConsolePlugin, &out.ConsolePlugin, s); err != nil { return err } @@ -885,7 +887,9 @@ func autoConvert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(in *v1b if err := Convert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(&in.Processor, &out.Processor, s); err != nil { return err } - // INFO: in.Loki opted out of conversion generation + if err := Convert_v1beta2_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(&in.Loki, &out.Loki, s); err != nil { + return err + } if err := Convert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(&in.ConsolePlugin, &out.ConsolePlugin, s); err != nil { return err } diff --git a/api/v1beta1/flowcollector_types.go b/api/v1beta1/flowcollector_types.go index 8817ded47..f03dc0765 100644 --- a/api/v1beta1/flowcollector_types.go +++ b/api/v1beta1/flowcollector_types.go @@ -58,7 +58,6 @@ type FlowCollectorSpec struct { Processor FlowCollectorFLP `json:"processor,omitempty"` // loki, the flow store, client settings. - // +k8s:conversion-gen=false Loki FlowCollectorLoki `json:"loki,omitempty"` // `consolePlugin` defines the settings related to the OpenShift Console plugin, when available. @@ -849,7 +848,7 @@ type FlowCollectorStatus struct { // +kubebuilder:printcolumn:name="Sampling (EBPF)",type="string",JSONPath=`.spec.agent.ebpf.sampling` // +kubebuilder:printcolumn:name="Deployment Model",type="string",JSONPath=`.spec.deploymentModel` // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[*].reason" - +// +kubebuilder:storageversion // `FlowCollector` is the schema for the network flows collection API, which pilots and configures the underlying deployments. type FlowCollector struct { metav1.TypeMeta `json:",inline"` diff --git a/api/v1beta1/zz_generated.conversion.go b/api/v1beta1/zz_generated.conversion.go index a7a1bc5de..95af7b562 100644 --- a/api/v1beta1/zz_generated.conversion.go +++ b/api/v1beta1/zz_generated.conversion.go @@ -927,7 +927,9 @@ func autoConvert_v1beta1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in *Flow if err := Convert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(&in.Processor, &out.Processor, s); err != nil { return err } - // INFO: in.Loki opted out of conversion generation + if err := Convert_v1beta1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(&in.Loki, &out.Loki, s); err != nil { + return err + } if err := Convert_v1beta1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(&in.ConsolePlugin, &out.ConsolePlugin, s); err != nil { return err } @@ -952,7 +954,9 @@ func autoConvert_v1beta2_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(in *v1be if err := Convert_v1beta2_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(&in.Processor, &out.Processor, s); err != nil { return err } - // INFO: in.Loki opted out of conversion generation + if err := Convert_v1beta2_FlowCollectorLoki_To_v1beta1_FlowCollectorLoki(&in.Loki, &out.Loki, s); err != nil { + return err + } if err := Convert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(&in.ConsolePlugin, &out.ConsolePlugin, s); err != nil { return err } diff --git a/api/v1beta2/flowcollector_types.go b/api/v1beta2/flowcollector_types.go index dbf42572a..3a7e019d8 100644 --- a/api/v1beta2/flowcollector_types.go +++ b/api/v1beta2/flowcollector_types.go @@ -58,7 +58,6 @@ type FlowCollectorSpec struct { Processor FlowCollectorFLP `json:"processor,omitempty"` // loki, the flow store, client settings. - // +k8s:conversion-gen=false Loki FlowCollectorLoki `json:"loki,omitempty"` // `consolePlugin` defines the settings related to the OpenShift Console plugin, when available. @@ -879,8 +878,6 @@ type FlowCollectorStatus struct { // +kubebuilder:printcolumn:name="Sampling (EBPF)",type="string",JSONPath=`.spec.agent.ebpf.sampling` // +kubebuilder:printcolumn:name="Deployment Model",type="string",JSONPath=`.spec.deploymentModel` // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[*].reason" -// +kubebuilder:storageversion - // `FlowCollector` is the schema for the network flows collection API, which pilots and configures the underlying deployments. type FlowCollector struct { metav1.TypeMeta `json:",inline"` diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml index 60bcc29c5..911d5f3f9 100644 --- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml +++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml @@ -4918,7 +4918,7 @@ spec: type: object type: object served: true - storage: false + storage: true subresources: status: {} - additionalPrinterColumns: @@ -7584,7 +7584,7 @@ spec: type: object type: object served: true - storage: true + storage: false subresources: status: {} status: diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml index 0ed293abc..1d51c153b 100644 --- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml +++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml @@ -4904,7 +4904,7 @@ spec: type: object type: object served: true - storage: false + storage: true subresources: status: {} - additionalPrinterColumns: @@ -7570,7 +7570,7 @@ spec: type: object type: object served: true - storage: true + storage: false subresources: status: {} status: From c2d87fbe653d1534272ca3b250bd35f34780025d Mon Sep 17 00:00:00 2001 From: Julien Pinsonneau Date: Wed, 4 Oct 2023 11:28:49 +0200 Subject: [PATCH 13/17] remove v1beta2 from autogen --- Makefile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 458c72ef6..ad6fbdc40 100644 --- a/Makefile +++ b/Makefile @@ -252,11 +252,10 @@ doc: crdoc ## Generate markdown documentation $(CRDOC) --resources config/crd/bases/flows.netobserv.io_flowcollectors.yaml --output docs/FlowCollector.md generate-go-conversions: $(CONVERSION_GEN) ## Run all generate-go-conversions - $(MAKE) clean-generated-conversions SRC_DIRS="./api/v1alpha1 ./api/v1beta1 ./api/v1beta2" + $(MAKE) clean-generated-conversions SRC_DIRS="./api/v1alpha1 ./api/v1beta1" $(CONVERSION_GEN) \ --input-dirs=./api/v1alpha1 \ --input-dirs=./api/v1beta1 \ - --input-dirs=./api/v1beta2 \ --build-tag=ignore_autogenerated_core \ --output-file-base=zz_generated.conversion \ $(CONVERSION_GEN_OUTPUT_BASE) \ From 282b28ff4a20fdc7242a1903978b0cb098100565 Mon Sep 17 00:00:00 2001 From: Julien Pinsonneau Date: Thu, 5 Oct 2023 16:16:14 +0200 Subject: [PATCH 14/17] fix tests & misc + deprecate v1alpha1 --- Makefile | 2 +- api/v1alpha1/doc.go | 2 + api/v1alpha1/flowcollector_types.go | 6 +- .../flows.netobserv.io_flowcollectors.yaml | 5 +- .../flows.netobserv.io_flowcollectors.yaml | 5 +- .../flows.netobserv.io_flowcollectors.yaml | 7582 +++++++++++++++++ controllers/flowcollector_controller_test.go | 2 + controllers/suite_test.go | 25 +- docs/FlowCollector.md | 3 +- main.go | 4 +- pkg/cleanup/cleanup_test.go | 4 +- 11 files changed, 7617 insertions(+), 23 deletions(-) create mode 100644 config/crd/hack/flows.netobserv.io_flowcollectors.yaml diff --git a/Makefile b/Makefile index ad6fbdc40..1e6e9e54f 100644 --- a/Makefile +++ b/Makefile @@ -252,7 +252,7 @@ doc: crdoc ## Generate markdown documentation $(CRDOC) --resources config/crd/bases/flows.netobserv.io_flowcollectors.yaml --output docs/FlowCollector.md generate-go-conversions: $(CONVERSION_GEN) ## Run all generate-go-conversions - $(MAKE) clean-generated-conversions SRC_DIRS="./api/v1alpha1 ./api/v1beta1" + $(MAKE) clean-generated-conversions SRC_DIRS="./api/v1alpha1,./api/v1beta1" $(CONVERSION_GEN) \ --input-dirs=./api/v1alpha1 \ --input-dirs=./api/v1beta1 \ diff --git a/api/v1alpha1/doc.go b/api/v1alpha1/doc.go index c0105103f..8e7d3f467 100644 --- a/api/v1alpha1/doc.go +++ b/api/v1alpha1/doc.go @@ -13,4 +13,6 @@ limitations under the License. // Package v1aplha1 contains the v1alpha1 API implementation. // +k8s:conversion-gen=github.com/netobserv/network-observability-operator/api/v1beta2 +// +// Deprecated: This package will be removed in one of the next releases. package v1alpha1 diff --git a/api/v1alpha1/flowcollector_types.go b/api/v1alpha1/flowcollector_types.go index 36613224c..75050d0f4 100644 --- a/api/v1alpha1/flowcollector_types.go +++ b/api/v1alpha1/flowcollector_types.go @@ -672,6 +672,8 @@ type FlowCollectorStatus struct { // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[*].reason" // FlowCollector is the Schema for the flowcollectors API, which pilots and configures netflow collection. +// +// Deprecated: This package will be removed in one of the next releases. type FlowCollector struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -680,9 +682,11 @@ type FlowCollector struct { Status FlowCollectorStatus `json:"status,omitempty"` } -//+kubebuilder:object:root=true +// +kubebuilder:object:root=true // FlowCollectorList contains a list of FlowCollector +// +// Deprecated: This package will be removed in one of the next releases. type FlowCollectorList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml index 911d5f3f9..1584f99ac 100644 --- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml +++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml @@ -42,8 +42,9 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: FlowCollector is the Schema for the flowcollectors API, which - pilots and configures netflow collection. + description: "FlowCollector is the Schema for the flowcollectors API, which + pilots and configures netflow collection. \n Deprecated: This package will + be removed in one of the next releases." properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml index 1d51c153b..4a8e7c073 100644 --- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml +++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml @@ -32,8 +32,9 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: FlowCollector is the Schema for the flowcollectors API, which - pilots and configures netflow collection. + description: "FlowCollector is the Schema for the flowcollectors API, which + pilots and configures netflow collection. \n Deprecated: This package will + be removed in one of the next releases." properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation diff --git a/config/crd/hack/flows.netobserv.io_flowcollectors.yaml b/config/crd/hack/flows.netobserv.io_flowcollectors.yaml new file mode 100644 index 000000000..0edbd9e3a --- /dev/null +++ b/config/crd/hack/flows.netobserv.io_flowcollectors.yaml @@ -0,0 +1,7582 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: flowcollectors.flows.netobserv.io +spec: + group: flows.netobserv.io + names: + kind: FlowCollector + listKind: FlowCollectorList + plural: flowcollectors + singular: flowcollector + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.agent.type + name: Agent + type: string + - jsonPath: .spec.agent.ebpf.sampling + name: Sampling (EBPF) + type: string + - jsonPath: .spec.deploymentModel + name: Deployment Model + type: string + - jsonPath: .status.conditions[*].reason + name: Status + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: "FlowCollector is the Schema for the flowcollectors API, which + pilots and configures netflow collection. \n Deprecated: This package will + be removed in one of the next releases." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: FlowCollectorSpec defines the desired state of FlowCollector + properties: + agent: + default: + type: EBPF + description: agent for flows extraction. + properties: + ebpf: + description: ebpf describes the settings related to the eBPF-based + flow reporter when the "agent.type" property is set to "EBPF". + properties: + cacheActiveTimeout: + default: 5s + description: cacheActiveTimeout is the max period during which + the reporter will aggregate flows before sending. Increasing + `cacheMaxFlows` and `cacheActiveTimeout` can decrease the + network traffic overhead and the CPU load, however you can + expect higher memory consumption and an increased latency + in the flow collection. + pattern: ^\d+(ns|ms|s|m)?$ + type: string + cacheMaxFlows: + default: 100000 + description: cacheMaxFlows is the max number of flows in an + aggregate; when reached, the reporter sends the flows. Increasing + `cacheMaxFlows` and `cacheActiveTimeout` can decrease the + network traffic overhead and the CPU load, however you can + expect higher memory consumption and an increased latency + in the flow collection. + format: int32 + minimum: 1 + type: integer + debug: + description: Debug allows setting some aspects of the internal + configuration of the eBPF agent. This section is aimed exclusively + for debugging and fine-grained performance optimizations + (for example GOGC, GOMAXPROCS env vars). Users setting its + values do it at their own risk. + properties: + env: + additionalProperties: + type: string + description: env allows passing custom environment variables + to the NetObserv Agent. Useful for passing some very + concrete performance-tuning options (such as GOGC, GOMAXPROCS) + that shouldn't be publicly exposed as part of the FlowCollector + descriptor, as they are only useful in edge debug and + support scenarios. + type: object + type: object + excludeInterfaces: + default: + - lo + description: excludeInterfaces contains the interface names + that will be excluded from flow tracing. If an entry is + enclosed by slashes (such as `/br-/`), it will match as + regular expression, otherwise it will be matched as a case-sensitive + string. + items: + type: string + type: array + imagePullPolicy: + default: IfNotPresent + description: imagePullPolicy is the Kubernetes pull policy + for the image defined above + enum: + - IfNotPresent + - Always + - Never + type: string + interfaces: + description: interfaces contains the interface names from + where flows will be collected. If empty, the agent will + fetch all the interfaces in the system, excepting the ones + listed in ExcludeInterfaces. If an entry is enclosed by + slashes (such as `/br-/`), it will match as regular expression, + otherwise it will be matched as a case-sensitive string. + items: + type: string + type: array + kafkaBatchSize: + default: 10485760 + description: 'kafkaBatchSize limits the maximum size of a + request in bytes before being sent to a partition. Ignored + when not using Kafka. Default: 10MB.' + type: integer + logLevel: + default: info + description: logLevel defines the log level for the NetObserv + eBPF Agent + enum: + - trace + - debug + - info + - warn + - error + - fatal + - panic + type: string + privileged: + description: 'privileged mode for the eBPF Agent container. + In general this setting can be ignored or set to false: + in that case, the operator will set granular capabilities + (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, + to enable its correct operation. If for some reason these + capabilities cannot be set (for example old kernel version + not knowing CAP_BPF) then you can turn on this mode for + more global privileges.' + type: boolean + resources: + default: + limits: + memory: 800Mi + requests: + cpu: 100m + memory: 50Mi + description: 'resources are the compute resources required + by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. \n This field + is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + sampling: + default: 50 + description: sampling rate of the flow reporter. 100 means + one flow on 100 is sent. 0 or 1 means all flows are sampled. + format: int32 + minimum: 0 + type: integer + type: object + ipfix: + description: ipfix describes the settings related to the IPFIX-based + flow reporter when the "agent.type" property is set to "IPFIX". + properties: + cacheActiveTimeout: + default: 20s + description: cacheActiveTimeout is the max period during which + the reporter will aggregate flows before sending + pattern: ^\d+(ns|ms|s|m)?$ + type: string + cacheMaxFlows: + default: 400 + description: cacheMaxFlows is the max number of flows in an + aggregate; when reached, the reporter sends the flows + format: int32 + minimum: 0 + type: integer + clusterNetworkOperator: + description: clusterNetworkOperator defines the settings related + to the OpenShift Cluster Network Operator, when available. + properties: + namespace: + default: openshift-network-operator + description: namespace where the config map is going + to be deployed. + type: string + type: object + forceSampleAll: + default: false + description: forceSampleAll allows disabling sampling in the + IPFIX-based flow reporter. It is not recommended to sample + all the traffic with IPFIX, as it might generate cluster + instability. If you REALLY want to do that, set this flag + to true. Use at your own risk. When it is set to true, the + value of "sampling" is ignored. + type: boolean + ovnKubernetes: + description: ovnKubernetes defines the settings of the OVN-Kubernetes + CNI, when available. This configuration is used when using + OVN's IPFIX exports, without OpenShift. When using OpenShift, + refer to the `clusterNetworkOperator` property instead. + properties: + containerName: + default: ovnkube-node + description: containerName defines the name of the container + to configure for IPFIX. + type: string + daemonSetName: + default: ovnkube-node + description: daemonSetName defines the name of the DaemonSet + controlling the OVN-Kubernetes pods. + type: string + namespace: + default: ovn-kubernetes + description: namespace where OVN-Kubernetes pods are deployed. + type: string + type: object + sampling: + default: 400 + description: sampling is the sampling rate on the reporter. + 100 means one flow on 100 is sent. To ensure cluster stability, + it is not possible to set a value below 2. If you really + want to sample every packet, which might impact the cluster + stability, refer to "forceSampleAll". Alternatively, you + can use the eBPF Agent instead of IPFIX. + format: int32 + minimum: 2 + type: integer + type: object + type: + default: EBPF + description: type selects the flows tracing agent. Possible values + are "EBPF" (default) to use NetObserv eBPF agent, "IPFIX" to + use the legacy IPFIX collector. "EBPF" is recommended in most + cases as it offers better performances and should work regardless + of the CNI installed on the cluster. "IPFIX" works with OVN-Kubernetes + CNI (other CNIs could work if they support exporting IPFIX, + but they would require manual configuration). + enum: + - EBPF + - IPFIX + type: string + required: + - type + type: object + consolePlugin: + description: consolePlugin defines the settings related to the OpenShift + Console plugin, when available. + properties: + autoscaler: + description: autoscaler spec of a horizontal pod autoscaler to + set up for the plugin Deployment. + properties: + maxReplicas: + default: 3 + description: maxReplicas is the upper limit for the number + of pods that can be set by the autoscaler; cannot be smaller + than MinReplicas. + format: int32 + type: integer + metrics: + description: metrics used by the pod autoscaler + items: + description: MetricSpec specifies how to scale based on + a single metric (only `type` and one other matching field + should be set at once). + properties: + containerResource: + description: containerResource refers to a resource + metric (such as those specified in requests and limits) + known to Kubernetes describing a single container + in each pod of the current scale target (e.g. CPU + or memory). Such metrics are built in to Kubernetes, + and have special scaling options on top of those available + to normal per-pod metrics using the "pods" source. + This is an alpha feature and can be enabled by the + HPAContainerMetrics feature flag. + properties: + container: + description: container is the name of the container + in the pods of the scaling target + type: string + name: + description: name is the name of the resource in + question. + type: string + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - container + - name + - target + type: object + external: + description: external refers to a global metric that + is not associated with any Kubernetes object. It allows + autoscaling based on information coming from components + running outside of cluster (for example length of + queue in cloud messaging service, or QPS from loadbalancer + running outside of cluster). + properties: + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - metric + - target + type: object + object: + description: object refers to a metric describing a + single kubernetes object (for example, hits-per-second + on an Ingress object). + properties: + describedObject: + description: describedObject specifies the descriptions + of a object,such as kind,name apiVersion + properties: + apiVersion: + description: apiVersion is the API version of + the referent + type: string + kind: + description: 'kind is the kind of the referent; + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'name is the name of the referent; + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - kind + - name + type: object + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - describedObject + - metric + - target + type: object + pods: + description: pods refers to a metric describing each + pod in the current scale target (for example, transactions-processed-per-second). The + values will be averaged together before being compared + to the target value. + properties: + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - metric + - target + type: object + resource: + description: resource refers to a resource metric (such + as those specified in requests and limits) known to + Kubernetes describing each pod in the current scale + target (e.g. CPU or memory). Such metrics are built + in to Kubernetes, and have special scaling options + on top of those available to normal per-pod metrics + using the "pods" source. + properties: + name: + description: name is the name of the resource in + question. + type: string + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - name + - target + type: object + type: + description: 'type is the type of metric source. It + should be one of "ContainerResource", "External", + "Object", "Pods" or "Resource", each mapping to a + matching field in the object. Note: "ContainerResource" + type is available on when the feature-gate HPAContainerMetrics + is enabled' + type: string + required: + - type + type: object + type: array + minReplicas: + description: minReplicas is the lower limit for the number + of replicas to which the autoscaler can scale down. It + defaults to 1 pod. minReplicas is allowed to be 0 if the + alpha feature gate HPAScaleToZero is enabled and at least + one Object or External metric is configured. Scaling is + active as long as at least one metric value is available. + format: int32 + type: integer + status: + default: DISABLED + description: Status describe the desired status regarding + deploying an horizontal pod autoscaler DISABLED will not + deploy an horizontal pod autoscaler ENABLED will deploy + an horizontal pod autoscaler + enum: + - DISABLED + - ENABLED + type: string + type: object + imagePullPolicy: + default: IfNotPresent + description: imagePullPolicy is the Kubernetes pull policy for + the image defined above + enum: + - IfNotPresent + - Always + - Never + type: string + logLevel: + default: info + description: logLevel for the console plugin backend + enum: + - trace + - debug + - info + - warn + - error + - fatal + - panic + type: string + port: + default: 9001 + description: port is the plugin service port + format: int32 + maximum: 65535 + minimum: 1 + type: integer + portNaming: + default: + enable: true + description: portNaming defines the configuration of the port-to-service + name translation + properties: + enable: + default: true + description: enable the console plugin port-to-service name + translation + type: boolean + portNames: + additionalProperties: + type: string + description: 'portNames defines additional port names to use + in the console. Example: portNames: {"3100": "loki"}' + type: object + type: object + quickFilters: + default: + - default: true + filter: + dst_namespace!: openshift-,netobserv + src_namespace!: openshift-,netobserv + name: Applications + - filter: + dst_namespace: openshift-,netobserv + src_namespace: openshift-,netobserv + name: Infrastructure + - default: true + filter: + dst_kind: Pod + src_kind: Pod + name: Pods network + - filter: + dst_kind: Service + name: Services network + description: quickFilters configures quick filter presets for + the Console plugin + items: + description: QuickFilter defines preset configuration for Console's + quick filters + properties: + default: + description: default defines whether this filter should + be active by default or not + type: boolean + filter: + additionalProperties: + type: string + description: 'filter is a set of keys and values to be set + when this filter is selected. Each key can relate to a + list of values using a coma-separated string. Example: + filter: {"src_namespace": "namespace1,namespace2"}' + type: object + name: + description: name of the filter, that will be displayed + in Console + type: string + required: + - filter + - name + type: object + type: array + register: + default: true + description: 'register allows, when set to true, to automatically + register the provided console plugin with the OpenShift Console + operator. When set to false, you can still register it manually + by editing console.operator.openshift.io/cluster. E.g: oc patch + console.operator.openshift.io cluster --type=''json'' -p ''[{"op": + "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]''' + type: boolean + replicas: + default: 1 + description: replicas defines the number of replicas (pods) to + start. + format: int32 + minimum: 0 + type: integer + resources: + default: + limits: + memory: 100Mi + requests: + cpu: 100m + memory: 50Mi + description: 'resources, in terms of compute resources, required + by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be + set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in + pod.spec.resourceClaims of the Pod where this field + is used. It makes that resource available inside a + container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests cannot exceed + Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + required: + - register + type: object + deploymentModel: + default: DIRECT + description: deploymentModel defines the desired type of deployment + for flow processing. Possible values are "DIRECT" (default) to make + the flow processor listening directly from the agents, or "KAFKA" + to make flows sent to a Kafka pipeline before consumption by the + processor. Kafka can provide better scalability, resiliency and + high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka). + enum: + - DIRECT + - KAFKA + type: string + exporters: + description: exporters defines additional optional exporters for custom + consumption or storage. This is an experimental feature. Currently, + only KAFKA exporter is available. + items: + description: FlowCollectorExporter defines an additional exporter + to send enriched flows to + properties: + kafka: + description: kafka configuration, such as address or topic, + to send enriched flows to. + properties: + address: + default: "" + description: address of the Kafka server + type: string + tls: + description: tls client configuration. When using TLS, verify + that the address matches the Kafka port used for TLS, + generally 9093. Note that, when eBPF agents are used, + Kafka certificate needs to be copied in the agent namespace + (by default it's netobserv-privileged). + properties: + caCert: + description: caCert defines the reference of the certificate + for the Certificate Authority + properties: + certFile: + description: certFile defines the path to the certificate + file name within the config map or secret + type: string + certKey: + description: certKey defines the path to the certificate + private key file name within the config map or + secret. Omit when the key is not necessary. + type: string + name: + description: name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes same + namespace as where NetObserv is deployed. If the + namespace is different, the config map or the + secret will be copied so that it can be mounted + as required. + type: string + type: + description: 'type for the certificate reference: + "configmap" or "secret"' + enum: + - configmap + - secret + type: string + type: object + enable: + default: false + description: enable TLS + type: boolean + insecureSkipVerify: + default: false + description: insecureSkipVerify allows skipping client-side + verification of the server certificate If set to true, + CACert field will be ignored + type: boolean + userCert: + description: userCert defines the user certificate reference, + used for mTLS (you can ignore it when using regular, + one-way TLS) + properties: + certFile: + description: certFile defines the path to the certificate + file name within the config map or secret + type: string + certKey: + description: certKey defines the path to the certificate + private key file name within the config map or + secret. Omit when the key is not necessary. + type: string + name: + description: name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes same + namespace as where NetObserv is deployed. If the + namespace is different, the config map or the + secret will be copied so that it can be mounted + as required. + type: string + type: + description: 'type for the certificate reference: + "configmap" or "secret"' + enum: + - configmap + - secret + type: string + type: object + type: object + topic: + default: "" + description: kafka topic to use. It must exist, NetObserv + will not create it. + type: string + required: + - address + - topic + type: object + type: + description: type selects the type of exporters. Only "KAFKA" + is available at the moment. + enum: + - KAFKA + type: string + required: + - type + type: object + type: array + kafka: + description: kafka configuration, allowing to use Kafka as a broker + as part of the flow collection pipeline. Available when the "spec.deploymentModel" + is "KAFKA". + properties: + address: + default: "" + description: address of the Kafka server + type: string + tls: + description: tls client configuration. When using TLS, verify + that the address matches the Kafka port used for TLS, generally + 9093. Note that, when eBPF agents are used, Kafka certificate + needs to be copied in the agent namespace (by default it's netobserv-privileged). + properties: + caCert: + description: caCert defines the reference of the certificate + for the Certificate Authority + properties: + certFile: + description: certFile defines the path to the certificate + file name within the config map or secret + type: string + certKey: + description: certKey defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary. + type: string + name: + description: name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string + type: + description: 'type for the certificate reference: "configmap" + or "secret"' + enum: + - configmap + - secret + type: string + type: object + enable: + default: false + description: enable TLS + type: boolean + insecureSkipVerify: + default: false + description: insecureSkipVerify allows skipping client-side + verification of the server certificate If set to true, CACert + field will be ignored + type: boolean + userCert: + description: userCert defines the user certificate reference, + used for mTLS (you can ignore it when using regular, one-way + TLS) + properties: + certFile: + description: certFile defines the path to the certificate + file name within the config map or secret + type: string + certKey: + description: certKey defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary. + type: string + name: + description: name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string + type: + description: 'type for the certificate reference: "configmap" + or "secret"' + enum: + - configmap + - secret + type: string + type: object + type: object + topic: + default: "" + description: kafka topic to use. It must exist, NetObserv will + not create it. + type: string + required: + - address + - topic + type: object + loki: + description: loki, the flow store, client settings. + properties: + authToken: + default: DISABLED + description: AuthToken describe the way to get a token to authenticate + to Loki. DISABLED will not send any token with the request. + HOST will use the local pod service account to authenticate + to Loki. FORWARD will forward user token, in this mode, pod + that are not receiving user request like the processor will + use the local pod service account. Similar to HOST mode. When + using the Loki Operator, set it to `HOST` or `FORWARD`. + enum: + - DISABLED + - HOST + - FORWARD + type: string + batchSize: + default: 102400 + description: batchSize is max batch size (in bytes) of logs to + accumulate before sending. + format: int64 + minimum: 1 + type: integer + batchWait: + default: 1s + description: batchWait is max time to wait before sending a batch. + type: string + maxBackoff: + default: 5s + description: maxBackoff is the maximum backoff time for client + connection between retries. + type: string + maxRetries: + default: 2 + description: maxRetries is the maximum number of retries for client + connections. + format: int32 + minimum: 0 + type: integer + minBackoff: + default: 1s + description: minBackoff is the initial backoff time for client + connection between retries. + type: string + querierUrl: + description: querierURL specifies the address of the Loki querier + service, in case it is different from the Loki ingester URL. + If empty, the URL value will be used (assuming that the Loki + ingester and querier are in the same server). When using the + Loki Operator, do not set it, since ingestion and queries use + the Loki gateway. + type: string + staticLabels: + additionalProperties: + type: string + default: + app: netobserv-flowcollector + description: staticLabels is a map of common labels to set on + each flow. + type: object + statusUrl: + description: statusURL specifies the address of the Loki /ready + /metrics /config endpoints, in case it is different from the + Loki querier URL. If empty, the QuerierURL value will be used. + This is useful to show error messages and some context in the + frontend. When using the Loki Operator, set it to the Loki HTTP + query frontend service, for example https://loki-query-frontend-http.netobserv.svc:3100/. + type: string + tenantID: + default: netobserv + description: tenantID is the Loki X-Scope-OrgID that identifies + the tenant for each request. When using the Loki Operator, set + it to `network`, which corresponds to a special tenant mode. + type: string + timeout: + default: 10s + description: timeout is the maximum time connection / request + limit. A Timeout of zero means no timeout. + type: string + tls: + description: tls client configuration. + properties: + caCert: + description: caCert defines the reference of the certificate + for the Certificate Authority + properties: + certFile: + description: certFile defines the path to the certificate + file name within the config map or secret + type: string + certKey: + description: certKey defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary. + type: string + name: + description: name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string + type: + description: 'type for the certificate reference: "configmap" + or "secret"' + enum: + - configmap + - secret + type: string + type: object + enable: + default: false + description: enable TLS + type: boolean + insecureSkipVerify: + default: false + description: insecureSkipVerify allows skipping client-side + verification of the server certificate If set to true, CACert + field will be ignored + type: boolean + userCert: + description: userCert defines the user certificate reference, + used for mTLS (you can ignore it when using regular, one-way + TLS) + properties: + certFile: + description: certFile defines the path to the certificate + file name within the config map or secret + type: string + certKey: + description: certKey defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary. + type: string + name: + description: name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string + type: + description: 'type for the certificate reference: "configmap" + or "secret"' + enum: + - configmap + - secret + type: string + type: object + type: object + url: + default: http://loki:3100/ + description: url is the address of an existing Loki service to + push the flows to. When using the Loki Operator, set it to the + Loki gateway service with the `network` tenant set in path, + for example https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network. + type: string + type: object + namespace: + description: namespace where NetObserv pods are deployed. If empty, + the namespace of the operator is going to be used. + type: string + processor: + description: processor defines the settings of the component that + receives the flows from the agent, enriches them, and forwards them + to the Loki persistence layer. + properties: + debug: + description: Debug allows setting some aspects of the internal + configuration of the flow processor. This section is aimed exclusively + for debugging and fine-grained performance optimizations (for + example GOGC, GOMAXPROCS env vars). Users setting its values + do it at their own risk. + properties: + env: + additionalProperties: + type: string + description: env allows passing custom environment variables + to the NetObserv Agent. Useful for passing some very concrete + performance-tuning options (such as GOGC, GOMAXPROCS) that + shouldn't be publicly exposed as part of the FlowCollector + descriptor, as they are only useful in edge debug and support + scenarios. + type: object + type: object + dropUnusedFields: + default: true + description: dropUnusedFields allows, when set to true, to drop + fields that are known to be unused by OVS, in order to save + storage space. + type: boolean + enableKubeProbes: + default: true + description: enableKubeProbes is a flag to enable or disable Kubernetes + liveness and readiness probes + type: boolean + healthPort: + default: 8080 + description: healthPort is a collector HTTP port in the Pod that + exposes the health check API + format: int32 + maximum: 65535 + minimum: 1 + type: integer + imagePullPolicy: + default: IfNotPresent + description: imagePullPolicy is the Kubernetes pull policy for + the image defined above + enum: + - IfNotPresent + - Always + - Never + type: string + kafkaConsumerAutoscaler: + description: kafkaConsumerAutoscaler spec of a horizontal pod + autoscaler to set up for flowlogs-pipeline-transformer, which + consumes Kafka messages. This setting is ignored when Kafka + is disabled. + properties: + maxReplicas: + default: 3 + description: maxReplicas is the upper limit for the number + of pods that can be set by the autoscaler; cannot be smaller + than MinReplicas. + format: int32 + type: integer + metrics: + description: metrics used by the pod autoscaler + items: + description: MetricSpec specifies how to scale based on + a single metric (only `type` and one other matching field + should be set at once). + properties: + containerResource: + description: containerResource refers to a resource + metric (such as those specified in requests and limits) + known to Kubernetes describing a single container + in each pod of the current scale target (e.g. CPU + or memory). Such metrics are built in to Kubernetes, + and have special scaling options on top of those available + to normal per-pod metrics using the "pods" source. + This is an alpha feature and can be enabled by the + HPAContainerMetrics feature flag. + properties: + container: + description: container is the name of the container + in the pods of the scaling target + type: string + name: + description: name is the name of the resource in + question. + type: string + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - container + - name + - target + type: object + external: + description: external refers to a global metric that + is not associated with any Kubernetes object. It allows + autoscaling based on information coming from components + running outside of cluster (for example length of + queue in cloud messaging service, or QPS from loadbalancer + running outside of cluster). + properties: + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - metric + - target + type: object + object: + description: object refers to a metric describing a + single kubernetes object (for example, hits-per-second + on an Ingress object). + properties: + describedObject: + description: describedObject specifies the descriptions + of a object,such as kind,name apiVersion + properties: + apiVersion: + description: apiVersion is the API version of + the referent + type: string + kind: + description: 'kind is the kind of the referent; + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'name is the name of the referent; + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - kind + - name + type: object + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - describedObject + - metric + - target + type: object + pods: + description: pods refers to a metric describing each + pod in the current scale target (for example, transactions-processed-per-second). The + values will be averaged together before being compared + to the target value. + properties: + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - metric + - target + type: object + resource: + description: resource refers to a resource metric (such + as those specified in requests and limits) known to + Kubernetes describing each pod in the current scale + target (e.g. CPU or memory). Such metrics are built + in to Kubernetes, and have special scaling options + on top of those available to normal per-pod metrics + using the "pods" source. + properties: + name: + description: name is the name of the resource in + question. + type: string + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - name + - target + type: object + type: + description: 'type is the type of metric source. It + should be one of "ContainerResource", "External", + "Object", "Pods" or "Resource", each mapping to a + matching field in the object. Note: "ContainerResource" + type is available on when the feature-gate HPAContainerMetrics + is enabled' + type: string + required: + - type + type: object + type: array + minReplicas: + description: minReplicas is the lower limit for the number + of replicas to which the autoscaler can scale down. It + defaults to 1 pod. minReplicas is allowed to be 0 if the + alpha feature gate HPAScaleToZero is enabled and at least + one Object or External metric is configured. Scaling is + active as long as at least one metric value is available. + format: int32 + type: integer + status: + default: DISABLED + description: Status describe the desired status regarding + deploying an horizontal pod autoscaler DISABLED will not + deploy an horizontal pod autoscaler ENABLED will deploy + an horizontal pod autoscaler + enum: + - DISABLED + - ENABLED + type: string + type: object + kafkaConsumerBatchSize: + default: 10485760 + description: 'kafkaConsumerBatchSize indicates to the broker the + maximum batch size, in bytes, that the consumer will accept. + Ignored when not using Kafka. Default: 10MB.' + type: integer + kafkaConsumerQueueCapacity: + default: 1000 + description: kafkaConsumerQueueCapacity defines the capacity of + the internal message queue used in the Kafka consumer client. + Ignored when not using Kafka. + type: integer + kafkaConsumerReplicas: + default: 3 + description: kafkaConsumerReplicas defines the number of replicas + (pods) to start for flowlogs-pipeline-transformer, which consumes + Kafka messages. This setting is ignored when Kafka is disabled. + format: int32 + minimum: 0 + type: integer + logLevel: + default: info + description: logLevel of the collector runtime + enum: + - trace + - debug + - info + - warn + - error + - fatal + - panic + type: string + metrics: + description: Metrics define the processor configuration regarding + metrics + properties: + ignoreTags: + default: + - egress + - packets + description: 'ignoreTags is a list of tags to specify which + metrics to ignore. Each metric is associated with a list + of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions + . Available tags are: egress, ingress, flows, bytes, packets, + namespaces, nodes, workloads' + items: + type: string + type: array + server: + description: metricsServer endpoint configuration for Prometheus + scraper + properties: + port: + default: 9102 + description: the prometheus HTTP port + format: int32 + maximum: 65535 + minimum: 1 + type: integer + tls: + description: TLS configuration. + properties: + provided: + description: TLS configuration. + properties: + certFile: + description: certFile defines the path to the + certificate file name within the config map + or secret + type: string + certKey: + description: certKey defines the path to the certificate + private key file name within the config map + or secret. Omit when the key is not necessary. + type: string + name: + description: name of the config map or secret + containing certificates + type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes + same namespace as where NetObserv is deployed. + If the namespace is different, the config map + or the secret will be copied so that it can + be mounted as required. + type: string + type: + description: 'type for the certificate reference: + "configmap" or "secret"' + enum: + - configmap + - secret + type: string + type: object + type: + default: DISABLED + description: Select the type of TLS configuration + "DISABLED" (default) to not configure TLS for the + endpoint, "PROVIDED" to manually provide cert file + and a key file, and "AUTO" to use OpenShift auto + generated certificate using annotations + enum: + - DISABLED + - PROVIDED + - AUTO + type: string + type: object + type: object + type: object + port: + default: 2055 + description: 'port of the flow collector (host port) By conventions, + some value are not authorized port must not be below 1024 and + must not equal this values: 4789,6081,500, and 4500' + format: int32 + maximum: 65535 + minimum: 1025 + type: integer + profilePort: + description: profilePort allows setting up a Go pprof profiler + listening to this port + format: int32 + maximum: 65535 + minimum: 0 + type: integer + resources: + default: + limits: + memory: 800Mi + requests: + cpu: 100m + memory: 100Mi + description: 'resources are the compute resources required by + this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be + set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in + pod.spec.resourceClaims of the Pod where this field + is used. It makes that resource available inside a + container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests cannot exceed + Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + required: + - agent + - deploymentModel + type: object + status: + description: FlowCollectorStatus defines the observed state of FlowCollector + properties: + conditions: + description: conditions represent the latest available observations + of an object's state + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n \ttype FooStatus struct{ \t // Represents the observations + of a foo's current state. \t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\" \t // + +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map + \t // +listMapKey=type \t Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields + \t}" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + namespace: + description: namespace where console plugin and flowlogs-pipeline + have been deployed. + type: string + required: + - conditions + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.agent.type + name: Agent + type: string + - jsonPath: .spec.agent.ebpf.sampling + name: Sampling (EBPF) + type: string + - jsonPath: .spec.deploymentModel + name: Deployment Model + type: string + - jsonPath: .status.conditions[*].reason + name: Status + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: '`FlowCollector` is the schema for the network flows collection + API, which pilots and configures the underlying deployments.' + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'Defines the desired state of the FlowCollector resource. +

*: the mention of "unsupported", or "deprecated" for a feature + throughout this document means that this feature is not officially supported + by Red Hat. It might have been, for instance, contributed by the community + and accepted without a formal agreement for maintenance. The product + maintainers might provide some support for these features as a best + effort only.' + properties: + agent: + description: Agent configuration for flows extraction. + properties: + ebpf: + description: '`ebpf` describes the settings related to the eBPF-based + flow reporter when `spec.agent.type` is set to `EBPF`.' + properties: + cacheActiveTimeout: + default: 5s + description: '`cacheActiveTimeout` is the max period during + which the reporter aggregates flows before sending. Increasing + `cacheMaxFlows` and `cacheActiveTimeout` can decrease the + network traffic overhead and the CPU load, however you can + expect higher memory consumption and an increased latency + in the flow collection.' + pattern: ^\d+(ns|ms|s|m)?$ + type: string + cacheMaxFlows: + default: 100000 + description: '`cacheMaxFlows` is the max number of flows in + an aggregate; when reached, the reporter sends the flows. + Increasing `cacheMaxFlows` and `cacheActiveTimeout` can + decrease the network traffic overhead and the CPU load, + however you can expect higher memory consumption and an + increased latency in the flow collection.' + format: int32 + minimum: 1 + type: integer + debug: + description: '`debug` allows setting some aspects of the internal + configuration of the eBPF agent. This section is aimed exclusively + for debugging and fine-grained performance optimizations, + such as GOGC and GOMAXPROCS env vars. Users setting its + values do it at their own risk.' + properties: + env: + additionalProperties: + type: string + description: '`env` allows passing custom environment + variables to underlying components. Useful for passing + some very concrete performance-tuning options, such + as GOGC and GOMAXPROCS, that should not be publicly + exposed as part of the FlowCollector descriptor, as + they are only useful in edge debug or support scenarios.' + type: object + type: object + excludeInterfaces: + default: + - lo + description: '`excludeInterfaces` contains the interface names + that are excluded from flow tracing. An entry is enclosed + by slashes, such as `/br-/`, is matched as a regular expression. + Otherwise it is matched as a case-sensitive string.' + items: + type: string + type: array + features: + description: 'List of additional features to enable. They + are all disabled by default. Enabling additional features + may have performance impacts. Possible values are:
- + `PacketDrop`: enable the packets drop flows logging feature. + This feature requires mounting the kernel debug filesystem, + so the eBPF pod has to run as privileged. If the `spec.agent.eBPF.privileged` + parameter is not set, an error is reported.
- `DNSTracking`: + enable the DNS tracking feature. This feature requires mounting + the kernel debug filesystem hence the eBPF pod has to run + as privileged. If the `spec.agent.eBPF.privileged` parameter + is not set, an error is reported.
- `FlowRTT` [unsupported + (*)]: enable flow latency (RTT) calculations in the eBPF + agent during TCP handshakes. This feature better works with + `sampling` set to 1.
' + items: + description: Agent feature, can be one of:
- `PacketDrop`, + to track packet drops.
- `DNSTracking`, to track specific + information on DNS traffic.
- `FlowRTT`, to track + TCP latency. [Unsupported (*)].
+ enum: + - PacketDrop + - DNSTracking + - FlowRTT + type: string + type: array + imagePullPolicy: + default: IfNotPresent + description: '`imagePullPolicy` is the Kubernetes pull policy + for the image defined above' + enum: + - IfNotPresent + - Always + - Never + type: string + interfaces: + description: '`interfaces` contains the interface names from + where flows are collected. If empty, the agent fetches all + the interfaces in the system, excepting the ones listed + in ExcludeInterfaces. An entry is enclosed by slashes, such + as `/br-/`, is matched as a regular expression. Otherwise + it is matched as a case-sensitive string.' + items: + type: string + type: array + kafkaBatchSize: + default: 10485760 + description: '`kafkaBatchSize` limits the maximum size of + a request in bytes before being sent to a partition. Ignored + when not using Kafka. Default: 10MB.' + type: integer + logLevel: + default: info + description: '`logLevel` defines the log level for the NetObserv + eBPF Agent' + enum: + - trace + - debug + - info + - warn + - error + - fatal + - panic + type: string + privileged: + description: 'Privileged mode for the eBPF Agent container. + In general this setting can be ignored or set to false: + in that case, the operator sets granular capabilities (BPF, + PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, to enable + its correct operation. If for some reason these capabilities + cannot be set, such as if an old kernel version not knowing + CAP_BPF is in use, then you can turn on this mode for more + global privileges.' + type: boolean + resources: + default: + limits: + memory: 800Mi + requests: + cpu: 100m + memory: 50Mi + description: '`resources` are the compute resources required + by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. \n This field + is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + sampling: + default: 50 + description: Sampling rate of the flow reporter. 100 means + one flow on 100 is sent. 0 or 1 means all flows are sampled. + format: int32 + minimum: 0 + type: integer + type: object + ipfix: + description: '`ipfix` [deprecated (*)] - describes the settings + related to the IPFIX-based flow reporter when `spec.agent.type` + is set to `IPFIX`.' + properties: + cacheActiveTimeout: + default: 20s + description: '`cacheActiveTimeout` is the max period during + which the reporter aggregates flows before sending.' + pattern: ^\d+(ns|ms|s|m)?$ + type: string + cacheMaxFlows: + default: 400 + description: '`cacheMaxFlows` is the max number of flows in + an aggregate; when reached, the reporter sends the flows.' + format: int32 + minimum: 0 + type: integer + clusterNetworkOperator: + description: '`clusterNetworkOperator` defines the settings + related to the OpenShift Cluster Network Operator, when + available.' + properties: + namespace: + default: openshift-network-operator + description: Namespace where the config map is going + to be deployed. + type: string + type: object + forceSampleAll: + default: false + description: '`forceSampleAll` allows disabling sampling in + the IPFIX-based flow reporter. It is not recommended to + sample all the traffic with IPFIX, as it might generate + cluster instability. If you REALLY want to do that, set + this flag to true. Use at your own risk. When it is set + to true, the value of `sampling` is ignored.' + type: boolean + ovnKubernetes: + description: '`ovnKubernetes` defines the settings of the + OVN-Kubernetes CNI, when available. This configuration is + used when using OVN''s IPFIX exports, without OpenShift. + When using OpenShift, refer to the `clusterNetworkOperator` + property instead.' + properties: + containerName: + default: ovnkube-node + description: '`containerName` defines the name of the + container to configure for IPFIX.' + type: string + daemonSetName: + default: ovnkube-node + description: '`daemonSetName` defines the name of the + DaemonSet controlling the OVN-Kubernetes pods.' + type: string + namespace: + default: ovn-kubernetes + description: Namespace where OVN-Kubernetes pods are deployed. + type: string + type: object + sampling: + default: 400 + description: '`sampling` is the sampling rate on the reporter. + 100 means one flow on 100 is sent. To ensure cluster stability, + it is not possible to set a value below 2. If you really + want to sample every packet, which might impact the cluster + stability, refer to `forceSampleAll`. Alternatively, you + can use the eBPF Agent instead of IPFIX.' + format: int32 + minimum: 2 + type: integer + type: object + type: + default: EBPF + description: '`type` selects the flows tracing agent. Possible + values are:
- `EBPF` (default) to use NetObserv eBPF agent.
+ - `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
+ `EBPF` is recommended as it offers better performances and should + work regardless of the CNI installed on the cluster. `IPFIX` + works with OVN-Kubernetes CNI (other CNIs could work if they + support exporting IPFIX, but they would require manual configuration).' + enum: + - EBPF + - IPFIX + type: string + type: object + consolePlugin: + description: '`consolePlugin` defines the settings related to the + OpenShift Console plugin, when available.' + properties: + autoscaler: + description: '`autoscaler` spec of a horizontal pod autoscaler + to set up for the plugin Deployment.' + properties: + maxReplicas: + default: 3 + description: '`maxReplicas` is the upper limit for the number + of pods that can be set by the autoscaler; cannot be smaller + than MinReplicas.' + format: int32 + type: integer + metrics: + description: Metrics used by the pod autoscaler + items: + description: MetricSpec specifies how to scale based on + a single metric (only `type` and one other matching field + should be set at once). + properties: + containerResource: + description: containerResource refers to a resource + metric (such as those specified in requests and limits) + known to Kubernetes describing a single container + in each pod of the current scale target (e.g. CPU + or memory). Such metrics are built in to Kubernetes, + and have special scaling options on top of those available + to normal per-pod metrics using the "pods" source. + This is an alpha feature and can be enabled by the + HPAContainerMetrics feature flag. + properties: + container: + description: container is the name of the container + in the pods of the scaling target + type: string + name: + description: name is the name of the resource in + question. + type: string + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - container + - name + - target + type: object + external: + description: external refers to a global metric that + is not associated with any Kubernetes object. It allows + autoscaling based on information coming from components + running outside of cluster (for example length of + queue in cloud messaging service, or QPS from loadbalancer + running outside of cluster). + properties: + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - metric + - target + type: object + object: + description: object refers to a metric describing a + single kubernetes object (for example, hits-per-second + on an Ingress object). + properties: + describedObject: + description: describedObject specifies the descriptions + of a object,such as kind,name apiVersion + properties: + apiVersion: + description: apiVersion is the API version of + the referent + type: string + kind: + description: 'kind is the kind of the referent; + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'name is the name of the referent; + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - kind + - name + type: object + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - describedObject + - metric + - target + type: object + pods: + description: pods refers to a metric describing each + pod in the current scale target (for example, transactions-processed-per-second). The + values will be averaged together before being compared + to the target value. + properties: + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - metric + - target + type: object + resource: + description: resource refers to a resource metric (such + as those specified in requests and limits) known to + Kubernetes describing each pod in the current scale + target (e.g. CPU or memory). Such metrics are built + in to Kubernetes, and have special scaling options + on top of those available to normal per-pod metrics + using the "pods" source. + properties: + name: + description: name is the name of the resource in + question. + type: string + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - name + - target + type: object + type: + description: 'type is the type of metric source. It + should be one of "ContainerResource", "External", + "Object", "Pods" or "Resource", each mapping to a + matching field in the object. Note: "ContainerResource" + type is available on when the feature-gate HPAContainerMetrics + is enabled' + type: string + required: + - type + type: object + type: array + minReplicas: + description: '`minReplicas` is the lower limit for the number + of replicas to which the autoscaler can scale down. It defaults + to 1 pod. minReplicas is allowed to be 0 if the alpha feature + gate HPAScaleToZero is enabled and at least one Object or + External metric is configured. Scaling is active as long + as at least one metric value is available.' + format: int32 + type: integer + status: + default: DISABLED + description: '`status` describes the desired status regarding + deploying an horizontal pod autoscaler.
- `DISABLED` + does not deploy an horizontal pod autoscaler.
- `ENABLED` + deploys an horizontal pod autoscaler.
' + enum: + - DISABLED + - ENABLED + type: string + type: object + enable: + default: true + description: enable the console plugin deployment. spec.Loki.enable + must also be true + type: boolean + imagePullPolicy: + default: IfNotPresent + description: '`imagePullPolicy` is the Kubernetes pull policy + for the image defined above' + enum: + - IfNotPresent + - Always + - Never + type: string + logLevel: + default: info + description: '`logLevel` for the console plugin backend' + enum: + - trace + - debug + - info + - warn + - error + - fatal + - panic + type: string + port: + default: 9001 + description: '`port` is the plugin service port. Do not use 9002, + which is reserved for metrics.' + format: int32 + maximum: 65535 + minimum: 1 + type: integer + portNaming: + default: + enable: true + description: '`portNaming` defines the configuration of the port-to-service + name translation' + properties: + enable: + default: true + description: Enable the console plugin port-to-service name + translation + type: boolean + portNames: + additionalProperties: + type: string + description: '`portNames` defines additional port names to + use in the console, for example, `portNames: {"3100": "loki"}`.' + type: object + type: object + quickFilters: + default: + - default: true + filter: + dst_namespace!: openshift-,netobserv + src_namespace!: openshift-,netobserv + name: Applications + - filter: + dst_namespace: openshift-,netobserv + src_namespace: openshift-,netobserv + name: Infrastructure + - default: true + filter: + dst_kind: Pod + src_kind: Pod + name: Pods network + - filter: + dst_kind: Service + name: Services network + description: '`quickFilters` configures quick filter presets for + the Console plugin' + items: + description: '`QuickFilter` defines preset configuration for + Console''s quick filters' + properties: + default: + description: '`default` defines whether this filter should + be active by default or not' + type: boolean + filter: + additionalProperties: + type: string + description: '`filter` is a set of keys and values to be + set when this filter is selected. Each key can relate + to a list of values using a coma-separated string, for + example, `filter: {"src_namespace": "namespace1,namespace2"}`.' + type: object + name: + description: Name of the filter, that is displayed in the + Console + type: string + required: + - filter + - name + type: object + type: array + register: + default: true + description: '`register` allows, when set to true, to automatically + register the provided console plugin with the OpenShift Console + operator. When set to false, you can still register it manually + by editing console.operator.openshift.io/cluster with the following + command: `oc patch console.operator.openshift.io cluster --type=''json'' + -p ''[{"op": "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]''`' + type: boolean + replicas: + default: 1 + description: '`replicas` defines the number of replicas (pods) + to start.' + format: int32 + minimum: 0 + type: integer + resources: + default: + limits: + memory: 100Mi + requests: + cpu: 100m + memory: 50Mi + description: '`resources`, in terms of compute resources, required + by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be + set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in + pod.spec.resourceClaims of the Pod where this field + is used. It makes that resource available inside a + container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests cannot exceed + Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + deploymentModel: + default: DIRECT + description: '`deploymentModel` defines the desired type of deployment + for flow processing. Possible values are:
- `DIRECT` (default) + to make the flow processor listening directly from the agents.
+ - `KAFKA` to make flows sent to a Kafka pipeline before consumption + by the processor.
Kafka can provide better scalability, resiliency, + and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka).' + enum: + - DIRECT + - KAFKA + type: string + exporters: + description: '`exporters` define additional optional exporters for + custom consumption or storage.' + items: + description: '`FlowCollectorExporter` defines an additional exporter + to send enriched flows to.' + properties: + ipfix: + description: IPFIX configuration, such as the IP address and + port to send enriched IPFIX flows to. + properties: + targetHost: + default: "" + description: Address of the IPFIX external receiver + type: string + targetPort: + description: Port for the IPFIX external receiver + type: integer + transport: + description: Transport protocol (`TCP` or `UDP`) to be used + for the IPFIX connection, defaults to `TCP`. + enum: + - TCP + - UDP + type: string + required: + - targetHost + - targetPort + type: object + kafka: + description: Kafka configuration, such as the address and topic, + to send enriched flows to. + properties: + address: + default: "" + description: Address of the Kafka server + type: string + sasl: + description: SASL authentication configuration. [Unsupported + (*)]. + properties: + clientIDReference: + description: Reference to the secret or config map containing + the client ID + properties: + file: + description: File name within the config map or + secret + type: string + name: + description: Name of the config map or secret containing + the file + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing the file. If omitted, the default is + to use the same namespace as where NetObserv is + deployed. If the namespace is different, the config + map or the secret is copied so that it can be + mounted as required. + type: string + type: + description: 'Type for the file reference: "configmap" + or "secret"' + enum: + - configmap + - secret + type: string + type: object + clientSecretReference: + description: Reference to the secret or config map containing + the client secret + properties: + file: + description: File name within the config map or + secret + type: string + name: + description: Name of the config map or secret containing + the file + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing the file. If omitted, the default is + to use the same namespace as where NetObserv is + deployed. If the namespace is different, the config + map or the secret is copied so that it can be + mounted as required. + type: string + type: + description: 'Type for the file reference: "configmap" + or "secret"' + enum: + - configmap + - secret + type: string + type: object + type: + default: DISABLED + description: Type of SASL authentication to use, or + `DISABLED` if SASL is not used + enum: + - DISABLED + - PLAIN + - SCRAM-SHA512 + type: string + type: object + tls: + description: TLS client configuration. When using TLS, verify + that the address matches the Kafka port used for TLS, + generally 9093. + properties: + caCert: + description: '`caCert` defines the reference of the + certificate for the Certificate Authority' + properties: + certFile: + description: '`certFile` defines the path to the + certificate file name within the config map or + secret' + type: string + certKey: + description: '`certKey` defines the path to the + certificate private key file name within the config + map or secret. Omit when the key is not necessary.' + type: string + name: + description: Name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing certificates. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, the + config map or the secret is copied so that it + can be mounted as required. + type: string + type: + description: 'Type for the certificate reference: + `configmap` or `secret`' + enum: + - configmap + - secret + type: string + type: object + enable: + default: false + description: Enable TLS + type: boolean + insecureSkipVerify: + default: false + description: '`insecureSkipVerify` allows skipping client-side + verification of the server certificate. If set to + true, the `caCert` field is ignored.' + type: boolean + userCert: + description: '`userCert` defines the user certificate + reference and is used for mTLS (you can ignore it + when using one-way TLS)' + properties: + certFile: + description: '`certFile` defines the path to the + certificate file name within the config map or + secret' + type: string + certKey: + description: '`certKey` defines the path to the + certificate private key file name within the config + map or secret. Omit when the key is not necessary.' + type: string + name: + description: Name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing certificates. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, the + config map or the secret is copied so that it + can be mounted as required. + type: string + type: + description: 'Type for the certificate reference: + `configmap` or `secret`' + enum: + - configmap + - secret + type: string + type: object + type: object + topic: + default: "" + description: Kafka topic to use. It must exist, NetObserv + does not create it. + type: string + required: + - address + - topic + type: object + type: + description: '`type` selects the type of exporters. The available + options are `KAFKA` and `IPFIX`.' + enum: + - KAFKA + - IPFIX + type: string + required: + - type + type: object + type: array + kafka: + description: Kafka configuration, allowing to use Kafka as a broker + as part of the flow collection pipeline. Available when the `spec.deploymentModel` + is `KAFKA`. + properties: + address: + default: "" + description: Address of the Kafka server + type: string + sasl: + description: SASL authentication configuration. [Unsupported (*)]. + properties: + clientIDReference: + description: Reference to the secret or config map containing + the client ID + properties: + file: + description: File name within the config map or secret + type: string + name: + description: Name of the config map or secret containing + the file + type: string + namespace: + default: "" + description: Namespace of the config map or secret containing + the file. If omitted, the default is to use the same + namespace as where NetObserv is deployed. If the namespace + is different, the config map or the secret is copied + so that it can be mounted as required. + type: string + type: + description: 'Type for the file reference: "configmap" + or "secret"' + enum: + - configmap + - secret + type: string + type: object + clientSecretReference: + description: Reference to the secret or config map containing + the client secret + properties: + file: + description: File name within the config map or secret + type: string + name: + description: Name of the config map or secret containing + the file + type: string + namespace: + default: "" + description: Namespace of the config map or secret containing + the file. If omitted, the default is to use the same + namespace as where NetObserv is deployed. If the namespace + is different, the config map or the secret is copied + so that it can be mounted as required. + type: string + type: + description: 'Type for the file reference: "configmap" + or "secret"' + enum: + - configmap + - secret + type: string + type: object + type: + default: DISABLED + description: Type of SASL authentication to use, or `DISABLED` + if SASL is not used + enum: + - DISABLED + - PLAIN + - SCRAM-SHA512 + type: string + type: object + tls: + description: TLS client configuration. When using TLS, verify + that the address matches the Kafka port used for TLS, generally + 9093. + properties: + caCert: + description: '`caCert` defines the reference of the certificate + for the Certificate Authority' + properties: + certFile: + description: '`certFile` defines the path to the certificate + file name within the config map or secret' + type: string + certKey: + description: '`certKey` defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary.' + type: string + name: + description: Name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret containing + certificates. If omitted, the default is to use the + same namespace as where NetObserv is deployed. If the + namespace is different, the config map or the secret + is copied so that it can be mounted as required. + type: string + type: + description: 'Type for the certificate reference: `configmap` + or `secret`' + enum: + - configmap + - secret + type: string + type: object + enable: + default: false + description: Enable TLS + type: boolean + insecureSkipVerify: + default: false + description: '`insecureSkipVerify` allows skipping client-side + verification of the server certificate. If set to true, + the `caCert` field is ignored.' + type: boolean + userCert: + description: '`userCert` defines the user certificate reference + and is used for mTLS (you can ignore it when using one-way + TLS)' + properties: + certFile: + description: '`certFile` defines the path to the certificate + file name within the config map or secret' + type: string + certKey: + description: '`certKey` defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary.' + type: string + name: + description: Name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret containing + certificates. If omitted, the default is to use the + same namespace as where NetObserv is deployed. If the + namespace is different, the config map or the secret + is copied so that it can be mounted as required. + type: string + type: + description: 'Type for the certificate reference: `configmap` + or `secret`' + enum: + - configmap + - secret + type: string + type: object + type: object + topic: + default: "" + description: Kafka topic to use. It must exist, NetObserv does + not create it. + type: string + required: + - address + - topic + type: object + loki: + description: loki, the flow store, client settings. + properties: + authToken: + default: DISABLED + description: '`authToken` describes the way to get a token to + authenticate to Loki.
- `DISABLED` does not send any token + with the request.
- `FORWARD` forwards the user token for + authorization.
- `HOST` [deprecated (*)] - uses the local + pod service account to authenticate to Loki.
When using + the Loki Operator, this must be set to `FORWARD`.' + enum: + - DISABLED + - HOST + - FORWARD + type: string + batchSize: + default: 102400 + description: '`batchSize` is the maximum batch size (in bytes) + of logs to accumulate before sending.' + format: int64 + minimum: 1 + type: integer + batchWait: + default: 1s + description: '`batchWait` is the maximum time to wait before sending + a batch.' + type: string + enable: + default: true + description: enable storing flows to Loki. It is required for + the OpenShift Console plugin installation. + type: boolean + maxBackoff: + default: 5s + description: '`maxBackoff` is the maximum backoff time for client + connection between retries.' + type: string + maxRetries: + default: 2 + description: '`maxRetries` is the maximum number of retries for + client connections.' + format: int32 + minimum: 0 + type: integer + minBackoff: + default: 1s + description: '`minBackoff` is the initial backoff time for client + connection between retries.' + type: string + querierUrl: + description: '`querierURL` specifies the address of the Loki querier + service, in case it is different from the Loki ingester URL. + If empty, the URL value is used (assuming that the Loki ingester + and querier are in the same server). When using the Loki Operator, + do not set it, since ingestion and queries use the Loki gateway.' + type: string + staticLabels: + additionalProperties: + type: string + default: + app: netobserv-flowcollector + description: '`staticLabels` is a map of common labels to set + on each flow.' + type: object + statusTls: + description: TLS client configuration for Loki status URL. + properties: + caCert: + description: '`caCert` defines the reference of the certificate + for the Certificate Authority' + properties: + certFile: + description: '`certFile` defines the path to the certificate + file name within the config map or secret' + type: string + certKey: + description: '`certKey` defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary.' + type: string + name: + description: Name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret containing + certificates. If omitted, the default is to use the + same namespace as where NetObserv is deployed. If the + namespace is different, the config map or the secret + is copied so that it can be mounted as required. + type: string + type: + description: 'Type for the certificate reference: `configmap` + or `secret`' + enum: + - configmap + - secret + type: string + type: object + enable: + default: false + description: Enable TLS + type: boolean + insecureSkipVerify: + default: false + description: '`insecureSkipVerify` allows skipping client-side + verification of the server certificate. If set to true, + the `caCert` field is ignored.' + type: boolean + userCert: + description: '`userCert` defines the user certificate reference + and is used for mTLS (you can ignore it when using one-way + TLS)' + properties: + certFile: + description: '`certFile` defines the path to the certificate + file name within the config map or secret' + type: string + certKey: + description: '`certKey` defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary.' + type: string + name: + description: Name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret containing + certificates. If omitted, the default is to use the + same namespace as where NetObserv is deployed. If the + namespace is different, the config map or the secret + is copied so that it can be mounted as required. + type: string + type: + description: 'Type for the certificate reference: `configmap` + or `secret`' + enum: + - configmap + - secret + type: string + type: object + type: object + statusUrl: + description: '`statusURL` specifies the address of the Loki `/ready`, + `/metrics` and `/config` endpoints, in case it is different + from the Loki querier URL. If empty, the `querierURL` value + is used. This is useful to show error messages and some context + in the frontend. When using the Loki Operator, set it to the + Loki HTTP query frontend service, for example https://loki-query-frontend-http.netobserv.svc:3100/. + `statusTLS` configuration is used when `statusUrl` is set.' + type: string + tenantID: + default: netobserv + description: '`tenantID` is the Loki `X-Scope-OrgID` that identifies + the tenant for each request. When using the Loki Operator, set + it to `network`, which corresponds to a special tenant mode.' + type: string + timeout: + default: 10s + description: '`timeout` is the maximum time connection / request + limit. A timeout of zero means no timeout.' + type: string + tls: + description: TLS client configuration for Loki URL. + properties: + caCert: + description: '`caCert` defines the reference of the certificate + for the Certificate Authority' + properties: + certFile: + description: '`certFile` defines the path to the certificate + file name within the config map or secret' + type: string + certKey: + description: '`certKey` defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary.' + type: string + name: + description: Name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret containing + certificates. If omitted, the default is to use the + same namespace as where NetObserv is deployed. If the + namespace is different, the config map or the secret + is copied so that it can be mounted as required. + type: string + type: + description: 'Type for the certificate reference: `configmap` + or `secret`' + enum: + - configmap + - secret + type: string + type: object + enable: + default: false + description: Enable TLS + type: boolean + insecureSkipVerify: + default: false + description: '`insecureSkipVerify` allows skipping client-side + verification of the server certificate. If set to true, + the `caCert` field is ignored.' + type: boolean + userCert: + description: '`userCert` defines the user certificate reference + and is used for mTLS (you can ignore it when using one-way + TLS)' + properties: + certFile: + description: '`certFile` defines the path to the certificate + file name within the config map or secret' + type: string + certKey: + description: '`certKey` defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary.' + type: string + name: + description: Name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret containing + certificates. If omitted, the default is to use the + same namespace as where NetObserv is deployed. If the + namespace is different, the config map or the secret + is copied so that it can be mounted as required. + type: string + type: + description: 'Type for the certificate reference: `configmap` + or `secret`' + enum: + - configmap + - secret + type: string + type: object + type: object + url: + default: http://loki:3100/ + description: '`url` is the address of an existing Loki service + to push the flows to. When using the Loki Operator, set it to + the Loki gateway service with the `network` tenant set in path, + for example https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network.' + type: string + type: object + namespace: + default: netobserv + description: Namespace where NetObserv pods are deployed. + type: string + processor: + description: '`processor` defines the settings of the component that + receives the flows from the agent, enriches them, generates metrics, + and forwards them to the Loki persistence layer and/or any available + exporter.' + properties: + clusterName: + default: "" + description: '`clusterName` is the name of the cluster to appear + in the flows data. This is useful in a multi-cluster context. + When using OpenShift, leave empty to make it automatically determined.' + type: string + conversationEndTimeout: + default: 10s + description: '`conversationEndTimeout` is the time to wait after + a network flow is received, to consider the conversation ended. + This delay is ignored when a FIN packet is collected for TCP + flows (see `conversationTerminatingTimeout` instead).' + type: string + conversationHeartbeatInterval: + default: 30s + description: '`conversationHeartbeatInterval` is the time to wait + between "tick" events of a conversation' + type: string + conversationTerminatingTimeout: + default: 5s + description: '`conversationTerminatingTimeout` is the time to + wait from detected FIN flag to end a conversation. Only relevant + for TCP flows.' + type: string + debug: + description: '`debug` allows setting some aspects of the internal + configuration of the flow processor. This section is aimed exclusively + for debugging and fine-grained performance optimizations, such + as GOGC and GOMAXPROCS env vars. Users setting its values do + it at their own risk.' + properties: + env: + additionalProperties: + type: string + description: '`env` allows passing custom environment variables + to underlying components. Useful for passing some very concrete + performance-tuning options, such as GOGC and GOMAXPROCS, + that should not be publicly exposed as part of the FlowCollector + descriptor, as they are only useful in edge debug or support + scenarios.' + type: object + type: object + dropUnusedFields: + default: true + description: '`dropUnusedFields` allows, when set to true, to + drop fields that are known to be unused by OVS, to save storage + space.' + type: boolean + enableKubeProbes: + default: true + description: '`enableKubeProbes` is a flag to enable or disable + Kubernetes liveness and readiness probes' + type: boolean + healthPort: + default: 8080 + description: '`healthPort` is a collector HTTP port in the Pod + that exposes the health check API' + format: int32 + maximum: 65535 + minimum: 1 + type: integer + imagePullPolicy: + default: IfNotPresent + description: '`imagePullPolicy` is the Kubernetes pull policy + for the image defined above' + enum: + - IfNotPresent + - Always + - Never + type: string + kafkaConsumerAutoscaler: + description: '`kafkaConsumerAutoscaler` is the spec of a horizontal + pod autoscaler to set up for `flowlogs-pipeline-transformer`, + which consumes Kafka messages. This setting is ignored when + Kafka is disabled.' + properties: + maxReplicas: + default: 3 + description: '`maxReplicas` is the upper limit for the number + of pods that can be set by the autoscaler; cannot be smaller + than MinReplicas.' + format: int32 + type: integer + metrics: + description: Metrics used by the pod autoscaler + items: + description: MetricSpec specifies how to scale based on + a single metric (only `type` and one other matching field + should be set at once). + properties: + containerResource: + description: containerResource refers to a resource + metric (such as those specified in requests and limits) + known to Kubernetes describing a single container + in each pod of the current scale target (e.g. CPU + or memory). Such metrics are built in to Kubernetes, + and have special scaling options on top of those available + to normal per-pod metrics using the "pods" source. + This is an alpha feature and can be enabled by the + HPAContainerMetrics feature flag. + properties: + container: + description: container is the name of the container + in the pods of the scaling target + type: string + name: + description: name is the name of the resource in + question. + type: string + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - container + - name + - target + type: object + external: + description: external refers to a global metric that + is not associated with any Kubernetes object. It allows + autoscaling based on information coming from components + running outside of cluster (for example length of + queue in cloud messaging service, or QPS from loadbalancer + running outside of cluster). + properties: + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - metric + - target + type: object + object: + description: object refers to a metric describing a + single kubernetes object (for example, hits-per-second + on an Ingress object). + properties: + describedObject: + description: describedObject specifies the descriptions + of a object,such as kind,name apiVersion + properties: + apiVersion: + description: apiVersion is the API version of + the referent + type: string + kind: + description: 'kind is the kind of the referent; + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'name is the name of the referent; + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - kind + - name + type: object + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - describedObject + - metric + - target + type: object + pods: + description: pods refers to a metric describing each + pod in the current scale target (for example, transactions-processed-per-second). The + values will be averaged together before being compared + to the target value. + properties: + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - metric + - target + type: object + resource: + description: resource refers to a resource metric (such + as those specified in requests and limits) known to + Kubernetes describing each pod in the current scale + target (e.g. CPU or memory). Such metrics are built + in to Kubernetes, and have special scaling options + on top of those available to normal per-pod metrics + using the "pods" source. + properties: + name: + description: name is the name of the resource in + question. + type: string + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - name + - target + type: object + type: + description: 'type is the type of metric source. It + should be one of "ContainerResource", "External", + "Object", "Pods" or "Resource", each mapping to a + matching field in the object. Note: "ContainerResource" + type is available on when the feature-gate HPAContainerMetrics + is enabled' + type: string + required: + - type + type: object + type: array + minReplicas: + description: '`minReplicas` is the lower limit for the number + of replicas to which the autoscaler can scale down. It defaults + to 1 pod. minReplicas is allowed to be 0 if the alpha feature + gate HPAScaleToZero is enabled and at least one Object or + External metric is configured. Scaling is active as long + as at least one metric value is available.' + format: int32 + type: integer + status: + default: DISABLED + description: '`status` describes the desired status regarding + deploying an horizontal pod autoscaler.
- `DISABLED` + does not deploy an horizontal pod autoscaler.
- `ENABLED` + deploys an horizontal pod autoscaler.
' + enum: + - DISABLED + - ENABLED + type: string + type: object + kafkaConsumerBatchSize: + default: 10485760 + description: '`kafkaConsumerBatchSize` indicates to the broker + the maximum batch size, in bytes, that the consumer accepts. + Ignored when not using Kafka. Default: 10MB.' + type: integer + kafkaConsumerQueueCapacity: + default: 1000 + description: '`kafkaConsumerQueueCapacity` defines the capacity + of the internal message queue used in the Kafka consumer client. + Ignored when not using Kafka.' + type: integer + kafkaConsumerReplicas: + default: 3 + description: '`kafkaConsumerReplicas` defines the number of replicas + (pods) to start for `flowlogs-pipeline-transformer`, which consumes + Kafka messages. This setting is ignored when Kafka is disabled.' + format: int32 + minimum: 0 + type: integer + logLevel: + default: info + description: '`logLevel` of the processor runtime' + enum: + - trace + - debug + - info + - warn + - error + - fatal + - panic + type: string + logTypes: + default: FLOWS + description: '`logTypes` defines the desired record types to generate. + Possible values are:
- `FLOWS` (default) to export regular + network flows
- `CONVERSATIONS` to generate events for started + conversations, ended conversations as well as periodic "tick" + updates
- `ENDED_CONVERSATIONS` to generate only ended conversations + events
- `ALL` to generate both network flows and all conversations + events
' + enum: + - FLOWS + - CONVERSATIONS + - ENDED_CONVERSATIONS + - ALL + type: string + metrics: + description: '`Metrics` define the processor configuration regarding + metrics' + properties: + disableAlerts: + description: '`disableAlerts` is a list of alerts that should + be disabled. Possible values are:
`NetObservNoFlows`, + which is triggered when no flows are being observed for + a certain period.
`NetObservLokiError`, which is triggered + when flows are being dropped due to Loki errors.
' + items: + description: Name of a processor alert. Possible values + are:
- `NetObservNoFlows`, which is triggered when + no flows are being observed for a certain period.
+ - `NetObservLokiError`, which is triggered when flows + are being dropped due to Loki errors.
+ enum: + - NetObservNoFlows + - NetObservLokiError + type: string + type: array + ignoreTags: + default: + - egress + - packets + - nodes-flows + - namespaces-flows + - workloads-flows + - namespaces + description: '`ignoreTags` is a list of tags to specify which + metrics to ignore. Each metric is associated with a list + of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions + . Available tags are: `egress`, `ingress`, `flows`, `bytes`, + `packets`, `namespaces`, `nodes`, `workloads`, `nodes-flows`, + `namespaces-flows`, `workloads-flows`. Namespace-based metrics + are covered by both `workloads` and `namespaces` tags, hence + it is recommended to always ignore one of them (`workloads` + offering a finer granularity).' + items: + type: string + type: array + server: + description: Metrics server endpoint configuration for Prometheus + scraper + properties: + port: + default: 9102 + description: The prometheus HTTP port + format: int32 + maximum: 65535 + minimum: 1 + type: integer + tls: + description: TLS configuration. + properties: + insecureSkipVerify: + default: false + description: insecureSkipVerify allows skipping client-side + verification of the provided certificate. If set + to true, the `providedCaFile` field is ignored. + type: boolean + provided: + description: TLS configuration when `type` is set + to `PROVIDED`. + properties: + certFile: + description: '`certFile` defines the path to the + certificate file name within the config map + or secret' + type: string + certKey: + description: '`certKey` defines the path to the + certificate private key file name within the + config map or secret. Omit when the key is not + necessary.' + type: string + name: + description: Name of the config map or secret + containing certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing certificates. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, + the config map or the secret is copied so that + it can be mounted as required. + type: string + type: + description: 'Type for the certificate reference: + `configmap` or `secret`' + enum: + - configmap + - secret + type: string + type: object + providedCaFile: + description: Reference to the CA file when `type` + is set to `PROVIDED`. + properties: + file: + description: File name within the config map or + secret + type: string + name: + description: Name of the config map or secret + containing the file + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing the file. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, + the config map or the secret is copied so that + it can be mounted as required. + type: string + type: + description: 'Type for the file reference: "configmap" + or "secret"' + enum: + - configmap + - secret + type: string + type: object + type: + default: DISABLED + description: Select the type of TLS configuration:
+ - `DISABLED` (default) to not configure TLS for + the endpoint. - `PROVIDED` to manually provide cert + file and a key file. - `AUTO` to use OpenShift auto + generated certificate using annotations. + enum: + - DISABLED + - PROVIDED + - AUTO + type: string + type: object + type: object + type: object + port: + default: 2055 + description: Port of the flow collector (host port). By convention, + some values are forbidden. It must be greater than 1024 and + different from 4500, 4789 and 6081. + format: int32 + maximum: 65535 + minimum: 1025 + type: integer + profilePort: + description: '`profilePort` allows setting up a Go pprof profiler + listening to this port' + format: int32 + maximum: 65535 + minimum: 0 + type: integer + resources: + default: + limits: + memory: 800Mi + requests: + cpu: 100m + memory: 100Mi + description: '`resources` are the compute resources required by + this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be + set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in + pod.spec.resourceClaims of the Pod where this field + is used. It makes that resource available inside a + container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests cannot exceed + Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + type: object + status: + description: '`FlowCollectorStatus` defines the observed state of FlowCollector' + properties: + conditions: + description: '`conditions` represent the latest available observations + of an object''s state' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n \ttype FooStatus struct{ \t // Represents the observations + of a foo's current state. \t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\" \t // + +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map + \t // +listMapKey=type \t Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields + \t}" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + namespace: + description: Namespace where console plugin and flowlogs-pipeline + have been deployed. + type: string + required: + - conditions + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.agent.type + name: Agent + type: string + - jsonPath: .spec.agent.ebpf.sampling + name: Sampling (EBPF) + type: string + - jsonPath: .spec.deploymentModel + name: Deployment Model + type: string + - jsonPath: .status.conditions[*].reason + name: Status + type: string + name: v1beta2 + schema: + openAPIV3Schema: + description: '`FlowCollector` is the schema for the network flows collection + API, which pilots and configures the underlying deployments.' + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'Defines the desired state of the FlowCollector resource. +

*: the mention of "unsupported", or "deprecated" for a feature + throughout this document means that this feature is not officially supported + by Red Hat. It might have been, for instance, contributed by the community + and accepted without a formal agreement for maintenance. The product + maintainers might provide some support for these features as a best + effort only.' + properties: + agent: + description: Agent configuration for flows extraction. + properties: + ebpf: + description: '`ebpf` describes the settings related to the eBPF-based + flow reporter when `spec.agent.type` is set to `EBPF`.' + properties: + cacheActiveTimeout: + default: 5s + description: '`cacheActiveTimeout` is the max period during + which the reporter aggregates flows before sending. Increasing + `cacheMaxFlows` and `cacheActiveTimeout` can decrease the + network traffic overhead and the CPU load, however you can + expect higher memory consumption and an increased latency + in the flow collection.' + pattern: ^\d+(ns|ms|s|m)?$ + type: string + cacheMaxFlows: + default: 100000 + description: '`cacheMaxFlows` is the max number of flows in + an aggregate; when reached, the reporter sends the flows. + Increasing `cacheMaxFlows` and `cacheActiveTimeout` can + decrease the network traffic overhead and the CPU load, + however you can expect higher memory consumption and an + increased latency in the flow collection.' + format: int32 + minimum: 1 + type: integer + debug: + description: '`debug` allows setting some aspects of the internal + configuration of the eBPF agent. This section is aimed exclusively + for debugging and fine-grained performance optimizations, + such as GOGC and GOMAXPROCS env vars. Users setting its + values do it at their own risk.' + properties: + env: + additionalProperties: + type: string + description: '`env` allows passing custom environment + variables to underlying components. Useful for passing + some very concrete performance-tuning options, such + as GOGC and GOMAXPROCS, that should not be publicly + exposed as part of the FlowCollector descriptor, as + they are only useful in edge debug or support scenarios.' + type: object + type: object + excludeInterfaces: + default: + - lo + description: '`excludeInterfaces` contains the interface names + that are excluded from flow tracing. An entry is enclosed + by slashes, such as `/br-/`, is matched as a regular expression. + Otherwise it is matched as a case-sensitive string.' + items: + type: string + type: array + features: + description: 'List of additional features to enable. They + are all disabled by default. Enabling additional features + may have performance impacts. Possible values are:
- + `PacketDrop`: enable the packets drop flows logging feature. + This feature requires mounting the kernel debug filesystem, + so the eBPF pod has to run as privileged. If the `spec.agent.eBPF.privileged` + parameter is not set, an error is reported.
- `DNSTracking`: + enable the DNS tracking feature. This feature requires mounting + the kernel debug filesystem hence the eBPF pod has to run + as privileged. If the `spec.agent.eBPF.privileged` parameter + is not set, an error is reported.
- `FlowRTT` [unsupported + (*)]: enable flow latency (RTT) calculations in the eBPF + agent during TCP handshakes. This feature better works with + `sampling` set to 1.
' + items: + description: Agent feature, can be one of:
- `PacketDrop`, + to track packet drops.
- `DNSTracking`, to track specific + information on DNS traffic.
- `FlowRTT`, to track + TCP latency. [Unsupported (*)].
+ enum: + - PacketDrop + - DNSTracking + - FlowRTT + type: string + type: array + imagePullPolicy: + default: IfNotPresent + description: '`imagePullPolicy` is the Kubernetes pull policy + for the image defined above' + enum: + - IfNotPresent + - Always + - Never + type: string + interfaces: + description: '`interfaces` contains the interface names from + where flows are collected. If empty, the agent fetches all + the interfaces in the system, excepting the ones listed + in ExcludeInterfaces. An entry is enclosed by slashes, such + as `/br-/`, is matched as a regular expression. Otherwise + it is matched as a case-sensitive string.' + items: + type: string + type: array + kafkaBatchSize: + default: 10485760 + description: '`kafkaBatchSize` limits the maximum size of + a request in bytes before being sent to a partition. Ignored + when not using Kafka. Default: 10MB.' + type: integer + logLevel: + default: info + description: '`logLevel` defines the log level for the NetObserv + eBPF Agent' + enum: + - trace + - debug + - info + - warn + - error + - fatal + - panic + type: string + privileged: + description: 'Privileged mode for the eBPF Agent container. + In general this setting can be ignored or set to false: + in that case, the operator sets granular capabilities (BPF, + PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, to enable + its correct operation. If for some reason these capabilities + cannot be set, such as if an old kernel version not knowing + CAP_BPF is in use, then you can turn on this mode for more + global privileges.' + type: boolean + resources: + default: + limits: + memory: 800Mi + requests: + cpu: 100m + memory: 50Mi + description: '`resources` are the compute resources required + by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. \n This field + is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + sampling: + default: 50 + description: Sampling rate of the flow reporter. 100 means + one flow on 100 is sent. 0 or 1 means all flows are sampled. + format: int32 + minimum: 0 + type: integer + type: object + ipfix: + description: '`ipfix` [deprecated (*)] - describes the settings + related to the IPFIX-based flow reporter when `spec.agent.type` + is set to `IPFIX`.' + properties: + cacheActiveTimeout: + default: 20s + description: '`cacheActiveTimeout` is the max period during + which the reporter aggregates flows before sending.' + pattern: ^\d+(ns|ms|s|m)?$ + type: string + cacheMaxFlows: + default: 400 + description: '`cacheMaxFlows` is the max number of flows in + an aggregate; when reached, the reporter sends the flows.' + format: int32 + minimum: 0 + type: integer + clusterNetworkOperator: + description: '`clusterNetworkOperator` defines the settings + related to the OpenShift Cluster Network Operator, when + available.' + properties: + namespace: + default: openshift-network-operator + description: Namespace where the config map is going + to be deployed. + type: string + type: object + forceSampleAll: + default: false + description: '`forceSampleAll` allows disabling sampling in + the IPFIX-based flow reporter. It is not recommended to + sample all the traffic with IPFIX, as it might generate + cluster instability. If you REALLY want to do that, set + this flag to true. Use at your own risk. When it is set + to true, the value of `sampling` is ignored.' + type: boolean + ovnKubernetes: + description: '`ovnKubernetes` defines the settings of the + OVN-Kubernetes CNI, when available. This configuration is + used when using OVN''s IPFIX exports, without OpenShift. + When using OpenShift, refer to the `clusterNetworkOperator` + property instead.' + properties: + containerName: + default: ovnkube-node + description: '`containerName` defines the name of the + container to configure for IPFIX.' + type: string + daemonSetName: + default: ovnkube-node + description: '`daemonSetName` defines the name of the + DaemonSet controlling the OVN-Kubernetes pods.' + type: string + namespace: + default: ovn-kubernetes + description: Namespace where OVN-Kubernetes pods are deployed. + type: string + type: object + sampling: + default: 400 + description: '`sampling` is the sampling rate on the reporter. + 100 means one flow on 100 is sent. To ensure cluster stability, + it is not possible to set a value below 2. If you really + want to sample every packet, which might impact the cluster + stability, refer to `forceSampleAll`. Alternatively, you + can use the eBPF Agent instead of IPFIX.' + format: int32 + minimum: 2 + type: integer + type: object + type: + default: EBPF + description: '`type` selects the flows tracing agent. Possible + values are:
- `EBPF` (default) to use NetObserv eBPF agent.
+ - `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
+ `EBPF` is recommended as it offers better performances and should + work regardless of the CNI installed on the cluster. `IPFIX` + works with OVN-Kubernetes CNI (other CNIs could work if they + support exporting IPFIX, but they would require manual configuration).' + enum: + - EBPF + - IPFIX + type: string + type: object + consolePlugin: + description: '`consolePlugin` defines the settings related to the + OpenShift Console plugin, when available.' + properties: + autoscaler: + description: '`autoscaler` spec of a horizontal pod autoscaler + to set up for the plugin Deployment.' + properties: + maxReplicas: + default: 3 + description: '`maxReplicas` is the upper limit for the number + of pods that can be set by the autoscaler; cannot be smaller + than MinReplicas.' + format: int32 + type: integer + metrics: + description: Metrics used by the pod autoscaler + items: + description: MetricSpec specifies how to scale based on + a single metric (only `type` and one other matching field + should be set at once). + properties: + containerResource: + description: containerResource refers to a resource + metric (such as those specified in requests and limits) + known to Kubernetes describing a single container + in each pod of the current scale target (e.g. CPU + or memory). Such metrics are built in to Kubernetes, + and have special scaling options on top of those available + to normal per-pod metrics using the "pods" source. + This is an alpha feature and can be enabled by the + HPAContainerMetrics feature flag. + properties: + container: + description: container is the name of the container + in the pods of the scaling target + type: string + name: + description: name is the name of the resource in + question. + type: string + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - container + - name + - target + type: object + external: + description: external refers to a global metric that + is not associated with any Kubernetes object. It allows + autoscaling based on information coming from components + running outside of cluster (for example length of + queue in cloud messaging service, or QPS from loadbalancer + running outside of cluster). + properties: + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - metric + - target + type: object + object: + description: object refers to a metric describing a + single kubernetes object (for example, hits-per-second + on an Ingress object). + properties: + describedObject: + description: describedObject specifies the descriptions + of a object,such as kind,name apiVersion + properties: + apiVersion: + description: apiVersion is the API version of + the referent + type: string + kind: + description: 'kind is the kind of the referent; + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'name is the name of the referent; + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - kind + - name + type: object + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - describedObject + - metric + - target + type: object + pods: + description: pods refers to a metric describing each + pod in the current scale target (for example, transactions-processed-per-second). The + values will be averaged together before being compared + to the target value. + properties: + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - metric + - target + type: object + resource: + description: resource refers to a resource metric (such + as those specified in requests and limits) known to + Kubernetes describing each pod in the current scale + target (e.g. CPU or memory). Such metrics are built + in to Kubernetes, and have special scaling options + on top of those available to normal per-pod metrics + using the "pods" source. + properties: + name: + description: name is the name of the resource in + question. + type: string + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - name + - target + type: object + type: + description: 'type is the type of metric source. It + should be one of "ContainerResource", "External", + "Object", "Pods" or "Resource", each mapping to a + matching field in the object. Note: "ContainerResource" + type is available on when the feature-gate HPAContainerMetrics + is enabled' + type: string + required: + - type + type: object + type: array + minReplicas: + description: '`minReplicas` is the lower limit for the number + of replicas to which the autoscaler can scale down. It defaults + to 1 pod. minReplicas is allowed to be 0 if the alpha feature + gate HPAScaleToZero is enabled and at least one Object or + External metric is configured. Scaling is active as long + as at least one metric value is available.' + format: int32 + type: integer + status: + default: DISABLED + description: '`status` describes the desired status regarding + deploying an horizontal pod autoscaler.
- `DISABLED` + does not deploy an horizontal pod autoscaler.
- `ENABLED` + deploys an horizontal pod autoscaler.
' + enum: + - DISABLED + - ENABLED + type: string + type: object + enable: + default: true + description: enable the console plugin deployment. spec.Loki.enable + must also be true + type: boolean + imagePullPolicy: + default: IfNotPresent + description: '`imagePullPolicy` is the Kubernetes pull policy + for the image defined above' + enum: + - IfNotPresent + - Always + - Never + type: string + logLevel: + default: info + description: '`logLevel` for the console plugin backend' + enum: + - trace + - debug + - info + - warn + - error + - fatal + - panic + type: string + port: + default: 9001 + description: '`port` is the plugin service port. Do not use 9002, + which is reserved for metrics.' + format: int32 + maximum: 65535 + minimum: 1 + type: integer + portNaming: + default: + enable: true + description: '`portNaming` defines the configuration of the port-to-service + name translation' + properties: + enable: + default: true + description: Enable the console plugin port-to-service name + translation + type: boolean + portNames: + additionalProperties: + type: string + description: '`portNames` defines additional port names to + use in the console, for example, `portNames: {"3100": "loki"}`.' + type: object + type: object + quickFilters: + default: + - default: true + filter: + dst_namespace!: openshift-,netobserv + src_namespace!: openshift-,netobserv + name: Applications + - filter: + dst_namespace: openshift-,netobserv + src_namespace: openshift-,netobserv + name: Infrastructure + - default: true + filter: + dst_kind: Pod + src_kind: Pod + name: Pods network + - filter: + dst_kind: Service + name: Services network + description: '`quickFilters` configures quick filter presets for + the Console plugin' + items: + description: '`QuickFilter` defines preset configuration for + Console''s quick filters' + properties: + default: + description: '`default` defines whether this filter should + be active by default or not' + type: boolean + filter: + additionalProperties: + type: string + description: '`filter` is a set of keys and values to be + set when this filter is selected. Each key can relate + to a list of values using a coma-separated string, for + example, `filter: {"src_namespace": "namespace1,namespace2"}`.' + type: object + name: + description: Name of the filter, that is displayed in the + Console + type: string + required: + - filter + - name + type: object + type: array + register: + default: true + description: '`register` allows, when set to true, to automatically + register the provided console plugin with the OpenShift Console + operator. When set to false, you can still register it manually + by editing console.operator.openshift.io/cluster with the following + command: `oc patch console.operator.openshift.io cluster --type=''json'' + -p ''[{"op": "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]''`' + type: boolean + replicas: + default: 1 + description: '`replicas` defines the number of replicas (pods) + to start.' + format: int32 + minimum: 0 + type: integer + resources: + default: + limits: + memory: 100Mi + requests: + cpu: 100m + memory: 50Mi + description: '`resources`, in terms of compute resources, required + by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be + set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in + pod.spec.resourceClaims of the Pod where this field + is used. It makes that resource available inside a + container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests cannot exceed + Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + deploymentModel: + default: DIRECT + description: '`deploymentModel` defines the desired type of deployment + for flow processing. Possible values are:
- `DIRECT` (default) + to make the flow processor listening directly from the agents.
+ - `KAFKA` to make flows sent to a Kafka pipeline before consumption + by the processor.
Kafka can provide better scalability, resiliency, + and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka).' + enum: + - DIRECT + - KAFKA + type: string + exporters: + description: '`exporters` define additional optional exporters for + custom consumption or storage.' + items: + description: '`FlowCollectorExporter` defines an additional exporter + to send enriched flows to.' + properties: + ipfix: + description: IPFIX configuration, such as the IP address and + port to send enriched IPFIX flows to. [Unsupported (*)]. + properties: + targetHost: + default: "" + description: Address of the IPFIX external receiver + type: string + targetPort: + description: Port for the IPFIX external receiver + type: integer + transport: + description: Transport protocol (`TCP` or `UDP`) to be used + for the IPFIX connection, defaults to `TCP`. + enum: + - TCP + - UDP + type: string + required: + - targetHost + - targetPort + type: object + kafka: + description: Kafka configuration, such as the address and topic, + to send enriched flows to. + properties: + address: + default: "" + description: Address of the Kafka server + type: string + sasl: + description: SASL authentication configuration. [Unsupported + (*)]. + properties: + clientIDReference: + description: Reference to the secret or config map containing + the client ID + properties: + file: + description: File name within the config map or + secret + type: string + name: + description: Name of the config map or secret containing + the file + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing the file. If omitted, the default is + to use the same namespace as where NetObserv is + deployed. If the namespace is different, the config + map or the secret is copied so that it can be + mounted as required. + type: string + type: + description: 'Type for the file reference: "configmap" + or "secret"' + enum: + - configmap + - secret + type: string + type: object + clientSecretReference: + description: Reference to the secret or config map containing + the client secret + properties: + file: + description: File name within the config map or + secret + type: string + name: + description: Name of the config map or secret containing + the file + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing the file. If omitted, the default is + to use the same namespace as where NetObserv is + deployed. If the namespace is different, the config + map or the secret is copied so that it can be + mounted as required. + type: string + type: + description: 'Type for the file reference: "configmap" + or "secret"' + enum: + - configmap + - secret + type: string + type: object + type: + default: DISABLED + description: Type of SASL authentication to use, or + `DISABLED` if SASL is not used + enum: + - DISABLED + - PLAIN + - SCRAM-SHA512 + type: string + type: object + tls: + description: TLS client configuration. When using TLS, verify + that the address matches the Kafka port used for TLS, + generally 9093. + properties: + caCert: + description: '`caCert` defines the reference of the + certificate for the Certificate Authority' + properties: + certFile: + description: '`certFile` defines the path to the + certificate file name within the config map or + secret' + type: string + certKey: + description: '`certKey` defines the path to the + certificate private key file name within the config + map or secret. Omit when the key is not necessary.' + type: string + name: + description: Name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing certificates. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, the + config map or the secret is copied so that it + can be mounted as required. + type: string + type: + description: 'Type for the certificate reference: + `configmap` or `secret`' + enum: + - configmap + - secret + type: string + type: object + enable: + default: false + description: Enable TLS + type: boolean + insecureSkipVerify: + default: false + description: '`insecureSkipVerify` allows skipping client-side + verification of the server certificate. If set to + true, the `caCert` field is ignored.' + type: boolean + userCert: + description: '`userCert` defines the user certificate + reference and is used for mTLS (you can ignore it + when using one-way TLS)' + properties: + certFile: + description: '`certFile` defines the path to the + certificate file name within the config map or + secret' + type: string + certKey: + description: '`certKey` defines the path to the + certificate private key file name within the config + map or secret. Omit when the key is not necessary.' + type: string + name: + description: Name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing certificates. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, the + config map or the secret is copied so that it + can be mounted as required. + type: string + type: + description: 'Type for the certificate reference: + `configmap` or `secret`' + enum: + - configmap + - secret + type: string + type: object + type: object + topic: + default: "" + description: Kafka topic to use. It must exist, NetObserv + does not create it. + type: string + required: + - address + - topic + type: object + type: + description: '`type` selects the type of exporters. The available + options are `KAFKA` and `IPFIX`. `IPFIX` is unsupported (*).' + enum: + - KAFKA + - IPFIX + type: string + required: + - type + type: object + type: array + kafka: + description: Kafka configuration, allowing to use Kafka as a broker + as part of the flow collection pipeline. Available when the `spec.deploymentModel` + is `KAFKA`. + properties: + address: + default: "" + description: Address of the Kafka server + type: string + sasl: + description: SASL authentication configuration. [Unsupported (*)]. + properties: + clientIDReference: + description: Reference to the secret or config map containing + the client ID + properties: + file: + description: File name within the config map or secret + type: string + name: + description: Name of the config map or secret containing + the file + type: string + namespace: + default: "" + description: Namespace of the config map or secret containing + the file. If omitted, the default is to use the same + namespace as where NetObserv is deployed. If the namespace + is different, the config map or the secret is copied + so that it can be mounted as required. + type: string + type: + description: 'Type for the file reference: "configmap" + or "secret"' + enum: + - configmap + - secret + type: string + type: object + clientSecretReference: + description: Reference to the secret or config map containing + the client secret + properties: + file: + description: File name within the config map or secret + type: string + name: + description: Name of the config map or secret containing + the file + type: string + namespace: + default: "" + description: Namespace of the config map or secret containing + the file. If omitted, the default is to use the same + namespace as where NetObserv is deployed. If the namespace + is different, the config map or the secret is copied + so that it can be mounted as required. + type: string + type: + description: 'Type for the file reference: "configmap" + or "secret"' + enum: + - configmap + - secret + type: string + type: object + type: + default: DISABLED + description: Type of SASL authentication to use, or `DISABLED` + if SASL is not used + enum: + - DISABLED + - PLAIN + - SCRAM-SHA512 + type: string + type: object + tls: + description: TLS client configuration. When using TLS, verify + that the address matches the Kafka port used for TLS, generally + 9093. + properties: + caCert: + description: '`caCert` defines the reference of the certificate + for the Certificate Authority' + properties: + certFile: + description: '`certFile` defines the path to the certificate + file name within the config map or secret' + type: string + certKey: + description: '`certKey` defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary.' + type: string + name: + description: Name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret containing + certificates. If omitted, the default is to use the + same namespace as where NetObserv is deployed. If the + namespace is different, the config map or the secret + is copied so that it can be mounted as required. + type: string + type: + description: 'Type for the certificate reference: `configmap` + or `secret`' + enum: + - configmap + - secret + type: string + type: object + enable: + default: false + description: Enable TLS + type: boolean + insecureSkipVerify: + default: false + description: '`insecureSkipVerify` allows skipping client-side + verification of the server certificate. If set to true, + the `caCert` field is ignored.' + type: boolean + userCert: + description: '`userCert` defines the user certificate reference + and is used for mTLS (you can ignore it when using one-way + TLS)' + properties: + certFile: + description: '`certFile` defines the path to the certificate + file name within the config map or secret' + type: string + certKey: + description: '`certKey` defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary.' + type: string + name: + description: Name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret containing + certificates. If omitted, the default is to use the + same namespace as where NetObserv is deployed. If the + namespace is different, the config map or the secret + is copied so that it can be mounted as required. + type: string + type: + description: 'Type for the certificate reference: `configmap` + or `secret`' + enum: + - configmap + - secret + type: string + type: object + type: object + topic: + default: "" + description: Kafka topic to use. It must exist, NetObserv does + not create it. + type: string + required: + - address + - topic + type: object + loki: + description: loki, the flow store, client settings. + properties: + batchSize: + default: 102400 + description: '`batchSize` is the maximum batch size (in bytes) + of logs to accumulate before sending.' + format: int64 + minimum: 1 + type: integer + batchWait: + default: 1s + description: '`batchWait` is the maximum time to wait before sending + a batch.' + type: string + enable: + default: true + description: enable storing flows to Loki. It is required for + the OpenShift Console plugin installation. + type: boolean + lokiStack: + description: Loki configuration for LOKISTACK mode. This is usefull + for an easy loki-operator config. It will be ignored for other + mods + properties: + name: + default: loki + type: string + namespace: + default: netobserv + type: string + type: object + manual: + description: Loki configuration for MANUAL mode. This is the more + flexible configuration. It will be ignored for other mods + properties: + authToken: + default: DISABLED + description: '`authToken` describes the way to get a token + to authenticate to Loki.
- `DISABLED` does not send + any token with the request.
- `FORWARD` forwards the + user token for authorization.
- `HOST` [deprecated (*)] + - uses the local pod service account to authenticate to + Loki.
When using the Loki Operator, this must be set + to `FORWARD`.' + enum: + - DISABLED + - HOST + - FORWARD + type: string + ingesterUrl: + default: http://loki:3100/ + description: '`ingesterUrl` is the address of an existing + Loki service to push the flows to. When using the Loki Operator, + set it to the Loki gateway service with the `network` tenant + set in path, for example https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network.' + type: string + querierUrl: + description: '`querierURL` specifies the address of the Loki + querier service, in case it is different from the Loki ingester + URL. If empty, the URL value is used (assuming that the + Loki ingester and querier are in the same server). When + using the Loki Operator, do not set it, since ingestion + and queries use the Loki gateway.' + type: string + statusTls: + description: TLS client configuration for Loki status URL. + properties: + caCert: + description: '`caCert` defines the reference of the certificate + for the Certificate Authority' + properties: + certFile: + description: '`certFile` defines the path to the certificate + file name within the config map or secret' + type: string + certKey: + description: '`certKey` defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary.' + type: string + name: + description: Name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing certificates. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, the + config map or the secret is copied so that it can + be mounted as required. + type: string + type: + description: 'Type for the certificate reference: + `configmap` or `secret`' + enum: + - configmap + - secret + type: string + type: object + enable: + default: false + description: Enable TLS + type: boolean + insecureSkipVerify: + default: false + description: '`insecureSkipVerify` allows skipping client-side + verification of the server certificate. If set to true, + the `caCert` field is ignored.' + type: boolean + userCert: + description: '`userCert` defines the user certificate + reference and is used for mTLS (you can ignore it when + using one-way TLS)' + properties: + certFile: + description: '`certFile` defines the path to the certificate + file name within the config map or secret' + type: string + certKey: + description: '`certKey` defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary.' + type: string + name: + description: Name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing certificates. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, the + config map or the secret is copied so that it can + be mounted as required. + type: string + type: + description: 'Type for the certificate reference: + `configmap` or `secret`' + enum: + - configmap + - secret + type: string + type: object + type: object + statusUrl: + description: '`statusURL` specifies the address of the Loki + `/ready`, `/metrics` and `/config` endpoints, in case it + is different from the Loki querier URL. If empty, the `querierURL` + value is used. This is useful to show error messages and + some context in the frontend. When using the Loki Operator, + set it to the Loki HTTP query frontend service, for example + https://loki-query-frontend-http.netobserv.svc:3100/. `statusTLS` + configuration is used when `statusUrl` is set.' + type: string + tenantID: + default: netobserv + description: '`tenantID` is the Loki `X-Scope-OrgID` that + identifies the tenant for each request. When using the Loki + Operator, set it to `network`, which corresponds to a special + tenant mode.' + type: string + tls: + description: TLS client configuration for Loki URL. + properties: + caCert: + description: '`caCert` defines the reference of the certificate + for the Certificate Authority' + properties: + certFile: + description: '`certFile` defines the path to the certificate + file name within the config map or secret' + type: string + certKey: + description: '`certKey` defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary.' + type: string + name: + description: Name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing certificates. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, the + config map or the secret is copied so that it can + be mounted as required. + type: string + type: + description: 'Type for the certificate reference: + `configmap` or `secret`' + enum: + - configmap + - secret + type: string + type: object + enable: + default: false + description: Enable TLS + type: boolean + insecureSkipVerify: + default: false + description: '`insecureSkipVerify` allows skipping client-side + verification of the server certificate. If set to true, + the `caCert` field is ignored.' + type: boolean + userCert: + description: '`userCert` defines the user certificate + reference and is used for mTLS (you can ignore it when + using one-way TLS)' + properties: + certFile: + description: '`certFile` defines the path to the certificate + file name within the config map or secret' + type: string + certKey: + description: '`certKey` defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary.' + type: string + name: + description: Name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing certificates. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, the + config map or the secret is copied so that it can + be mounted as required. + type: string + type: + description: 'Type for the certificate reference: + `configmap` or `secret`' + enum: + - configmap + - secret + type: string + type: object + type: object + type: object + maxBackoff: + default: 5s + description: '`maxBackoff` is the maximum backoff time for client + connection between retries.' + type: string + maxRetries: + default: 2 + description: '`maxRetries` is the maximum number of retries for + client connections.' + format: int32 + minimum: 0 + type: integer + minBackoff: + default: 1s + description: '`minBackoff` is the initial backoff time for client + connection between retries.' + type: string + mode: + default: MANUAL + enum: + - MANUAL + - LOKISTACK + type: string + staticLabels: + additionalProperties: + type: string + default: + app: netobserv-flowcollector + description: '`staticLabels` is a map of common labels to set + on each flow.' + type: object + timeout: + default: 10s + description: '`timeout` is the maximum time connection / request + limit. A timeout of zero means no timeout.' + type: string + type: object + namespace: + default: netobserv + description: Namespace where NetObserv pods are deployed. + type: string + processor: + description: '`processor` defines the settings of the component that + receives the flows from the agent, enriches them, generates metrics, + and forwards them to the Loki persistence layer and/or any available + exporter.' + properties: + clusterName: + default: "" + description: '`clusterName` is the name of the cluster to appear + in the flows data. This is useful in a multi-cluster context. + When using OpenShift, leave empty to make it automatically determined.' + type: string + conversationEndTimeout: + default: 10s + description: '`conversationEndTimeout` is the time to wait after + a network flow is received, to consider the conversation ended. + This delay is ignored when a FIN packet is collected for TCP + flows (see `conversationTerminatingTimeout` instead).' + type: string + conversationHeartbeatInterval: + default: 30s + description: '`conversationHeartbeatInterval` is the time to wait + between "tick" events of a conversation' + type: string + conversationTerminatingTimeout: + default: 5s + description: '`conversationTerminatingTimeout` is the time to + wait from detected FIN flag to end a conversation. Only relevant + for TCP flows.' + type: string + debug: + description: '`debug` allows setting some aspects of the internal + configuration of the flow processor. This section is aimed exclusively + for debugging and fine-grained performance optimizations, such + as GOGC and GOMAXPROCS env vars. Users setting its values do + it at their own risk.' + properties: + env: + additionalProperties: + type: string + description: '`env` allows passing custom environment variables + to underlying components. Useful for passing some very concrete + performance-tuning options, such as GOGC and GOMAXPROCS, + that should not be publicly exposed as part of the FlowCollector + descriptor, as they are only useful in edge debug or support + scenarios.' + type: object + type: object + dropUnusedFields: + default: true + description: '`dropUnusedFields` allows, when set to true, to + drop fields that are known to be unused by OVS, to save storage + space.' + type: boolean + enableKubeProbes: + default: true + description: '`enableKubeProbes` is a flag to enable or disable + Kubernetes liveness and readiness probes' + type: boolean + healthPort: + default: 8080 + description: '`healthPort` is a collector HTTP port in the Pod + that exposes the health check API' + format: int32 + maximum: 65535 + minimum: 1 + type: integer + imagePullPolicy: + default: IfNotPresent + description: '`imagePullPolicy` is the Kubernetes pull policy + for the image defined above' + enum: + - IfNotPresent + - Always + - Never + type: string + kafkaConsumerAutoscaler: + description: '`kafkaConsumerAutoscaler` is the spec of a horizontal + pod autoscaler to set up for `flowlogs-pipeline-transformer`, + which consumes Kafka messages. This setting is ignored when + Kafka is disabled.' + properties: + maxReplicas: + default: 3 + description: '`maxReplicas` is the upper limit for the number + of pods that can be set by the autoscaler; cannot be smaller + than MinReplicas.' + format: int32 + type: integer + metrics: + description: Metrics used by the pod autoscaler + items: + description: MetricSpec specifies how to scale based on + a single metric (only `type` and one other matching field + should be set at once). + properties: + containerResource: + description: containerResource refers to a resource + metric (such as those specified in requests and limits) + known to Kubernetes describing a single container + in each pod of the current scale target (e.g. CPU + or memory). Such metrics are built in to Kubernetes, + and have special scaling options on top of those available + to normal per-pod metrics using the "pods" source. + This is an alpha feature and can be enabled by the + HPAContainerMetrics feature flag. + properties: + container: + description: container is the name of the container + in the pods of the scaling target + type: string + name: + description: name is the name of the resource in + question. + type: string + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - container + - name + - target + type: object + external: + description: external refers to a global metric that + is not associated with any Kubernetes object. It allows + autoscaling based on information coming from components + running outside of cluster (for example length of + queue in cloud messaging service, or QPS from loadbalancer + running outside of cluster). + properties: + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - metric + - target + type: object + object: + description: object refers to a metric describing a + single kubernetes object (for example, hits-per-second + on an Ingress object). + properties: + describedObject: + description: describedObject specifies the descriptions + of a object,such as kind,name apiVersion + properties: + apiVersion: + description: apiVersion is the API version of + the referent + type: string + kind: + description: 'kind is the kind of the referent; + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'name is the name of the referent; + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - kind + - name + type: object + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - describedObject + - metric + - target + type: object + pods: + description: pods refers to a metric describing each + pod in the current scale target (for example, transactions-processed-per-second). The + values will be averaged together before being compared + to the target value. + properties: + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - metric + - target + type: object + resource: + description: resource refers to a resource metric (such + as those specified in requests and limits) known to + Kubernetes describing each pod in the current scale + target (e.g. CPU or memory). Such metrics are built + in to Kubernetes, and have special scaling options + on top of those available to normal per-pod metrics + using the "pods" source. + properties: + name: + description: name is the name of the resource in + question. + type: string + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - name + - target + type: object + type: + description: 'type is the type of metric source. It + should be one of "ContainerResource", "External", + "Object", "Pods" or "Resource", each mapping to a + matching field in the object. Note: "ContainerResource" + type is available on when the feature-gate HPAContainerMetrics + is enabled' + type: string + required: + - type + type: object + type: array + minReplicas: + description: '`minReplicas` is the lower limit for the number + of replicas to which the autoscaler can scale down. It defaults + to 1 pod. minReplicas is allowed to be 0 if the alpha feature + gate HPAScaleToZero is enabled and at least one Object or + External metric is configured. Scaling is active as long + as at least one metric value is available.' + format: int32 + type: integer + status: + default: DISABLED + description: '`status` describes the desired status regarding + deploying an horizontal pod autoscaler.
- `DISABLED` + does not deploy an horizontal pod autoscaler.
- `ENABLED` + deploys an horizontal pod autoscaler.
' + enum: + - DISABLED + - ENABLED + type: string + type: object + kafkaConsumerBatchSize: + default: 10485760 + description: '`kafkaConsumerBatchSize` indicates to the broker + the maximum batch size, in bytes, that the consumer accepts. + Ignored when not using Kafka. Default: 10MB.' + type: integer + kafkaConsumerQueueCapacity: + default: 1000 + description: '`kafkaConsumerQueueCapacity` defines the capacity + of the internal message queue used in the Kafka consumer client. + Ignored when not using Kafka.' + type: integer + kafkaConsumerReplicas: + default: 3 + description: '`kafkaConsumerReplicas` defines the number of replicas + (pods) to start for `flowlogs-pipeline-transformer`, which consumes + Kafka messages. This setting is ignored when Kafka is disabled.' + format: int32 + minimum: 0 + type: integer + logLevel: + default: info + description: '`logLevel` of the processor runtime' + enum: + - trace + - debug + - info + - warn + - error + - fatal + - panic + type: string + logTypes: + default: FLOWS + description: '`logTypes` defines the desired record types to generate. + Possible values are:
- `FLOWS` (default) to export regular + network flows
- `CONVERSATIONS` to generate events for started + conversations, ended conversations as well as periodic "tick" + updates
- `ENDED_CONVERSATIONS` to generate only ended conversations + events
- `ALL` to generate both network flows and all conversations + events
' + enum: + - FLOWS + - CONVERSATIONS + - ENDED_CONVERSATIONS + - ALL + type: string + metrics: + description: '`Metrics` define the processor configuration regarding + metrics' + properties: + disableAlerts: + description: '`disableAlerts` is a list of alerts that should + be disabled. Possible values are:
`NetObservNoFlows`, + which is triggered when no flows are being observed for + a certain period.
`NetObservLokiError`, which is triggered + when flows are being dropped due to Loki errors.
' + items: + description: Name of a processor alert. Possible values + are:
- `NetObservNoFlows`, which is triggered when + no flows are being observed for a certain period.
+ - `NetObservLokiError`, which is triggered when flows + are being dropped due to Loki errors.
+ enum: + - NetObservNoFlows + - NetObservLokiError + type: string + type: array + ignoreTags: + default: + - egress + - packets + - nodes-flows + - namespaces-flows + - workloads-flows + - namespaces + description: '`ignoreTags` is a list of tags to specify which + metrics to ignore. Each metric is associated with a list + of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions + . Available tags are: `egress`, `ingress`, `flows`, `bytes`, + `packets`, `namespaces`, `nodes`, `workloads`, `nodes-flows`, + `namespaces-flows`, `workloads-flows`. Namespace-based metrics + are covered by both `workloads` and `namespaces` tags, hence + it is recommended to always ignore one of them (`workloads` + offering a finer granularity).' + items: + type: string + type: array + server: + description: Metrics server endpoint configuration for Prometheus + scraper + properties: + port: + default: 9102 + description: The prometheus HTTP port + format: int32 + maximum: 65535 + minimum: 1 + type: integer + tls: + description: TLS configuration. + properties: + insecureSkipVerify: + default: false + description: insecureSkipVerify allows skipping client-side + verification of the provided certificate. If set + to true, the `providedCaFile` field is ignored. + type: boolean + provided: + description: TLS configuration when `type` is set + to `PROVIDED`. + properties: + certFile: + description: '`certFile` defines the path to the + certificate file name within the config map + or secret' + type: string + certKey: + description: '`certKey` defines the path to the + certificate private key file name within the + config map or secret. Omit when the key is not + necessary.' + type: string + name: + description: Name of the config map or secret + containing certificates + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing certificates. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, + the config map or the secret is copied so that + it can be mounted as required. + type: string + type: + description: 'Type for the certificate reference: + `configmap` or `secret`' + enum: + - configmap + - secret + type: string + type: object + providedCaFile: + description: Reference to the CA file when `type` + is set to `PROVIDED`. + properties: + file: + description: File name within the config map or + secret + type: string + name: + description: Name of the config map or secret + containing the file + type: string + namespace: + default: "" + description: Namespace of the config map or secret + containing the file. If omitted, the default + is to use the same namespace as where NetObserv + is deployed. If the namespace is different, + the config map or the secret is copied so that + it can be mounted as required. + type: string + type: + description: 'Type for the file reference: "configmap" + or "secret"' + enum: + - configmap + - secret + type: string + type: object + type: + default: DISABLED + description: Select the type of TLS configuration:
+ - `DISABLED` (default) to not configure TLS for + the endpoint. - `PROVIDED` to manually provide cert + file and a key file. - `AUTO` to use OpenShift auto + generated certificate using annotations. + enum: + - DISABLED + - PROVIDED + - AUTO + type: string + type: object + type: object + type: object + port: + default: 2055 + description: Port of the flow collector (host port). By convention, + some values are forbidden. It must be greater than 1024 and + different from 4500, 4789 and 6081. + format: int32 + maximum: 65535 + minimum: 1025 + type: integer + profilePort: + description: '`profilePort` allows setting up a Go pprof profiler + listening to this port' + format: int32 + maximum: 65535 + minimum: 0 + type: integer + resources: + default: + limits: + memory: 800Mi + requests: + cpu: 100m + memory: 100Mi + description: '`resources` are the compute resources required by + this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be + set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in + pod.spec.resourceClaims of the Pod where this field + is used. It makes that resource available inside a + container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests cannot exceed + Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + type: object + status: + description: '`FlowCollectorStatus` defines the observed state of FlowCollector' + properties: + conditions: + description: '`conditions` represent the latest available observations + of an object''s state' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n \ttype FooStatus struct{ \t // Represents the observations + of a foo's current state. \t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\" \t // + +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map + \t // +listMapKey=type \t Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields + \t}" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + namespace: + description: Namespace where console plugin and flowlogs-pipeline + have been deployed. + type: string + required: + - conditions + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/controllers/flowcollector_controller_test.go b/controllers/flowcollector_controller_test.go index 3a6dd3945..6b5024f24 100644 --- a/controllers/flowcollector_controller_test.go +++ b/controllers/flowcollector_controller_test.go @@ -641,6 +641,7 @@ func flowCollectorControllerSpecs() { Name: "loki-ca", Namespace: operatorNamespace, }, + Data: map[string]string{"ca.crt": "certificate data"}, })).Should(Succeed()) UpdateCR(crKey, func(fc *flowslatest.FlowCollector) { fc.Spec.Loki.Manual.TLS = flowslatest.ClientTLS{ @@ -701,6 +702,7 @@ func flowCollectorControllerSpecs() { } }) }) + It("Should have certificate mounted", func() { By("Expecting certificate mounted") Eventually(func() interface{} { diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 02b222028..1aa4d4d29 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -42,7 +42,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager" - flowsv1alpha1 "github.com/netobserv/network-observability-operator/api/v1alpha1" flowsv1beta1 "github.com/netobserv/network-observability-operator/api/v1beta1" flowsv1beta2 "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/operator" @@ -88,13 +87,20 @@ var _ = BeforeSuite(func() { By("bootstrapping test environment") testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{ - filepath.Join("..", "config", "crd", "bases"), - // We need to install the ConsolePlugin CRD to test setup of our Network Console Plugin - filepath.Join("..", "vendor", "github.com", "openshift", "api", "console", "v1alpha1"), - filepath.Join("..", "vendor", "github.com", "openshift", "api", "config", "v1"), - filepath.Join("..", "vendor", "github.com", "openshift", "api", "operator", "v1"), - filepath.Join("..", "test-assets"), + Scheme: scheme.Scheme, + CRDInstallOptions: envtest.CRDInstallOptions{ + Paths: []string{ + // FIXME: till v1beta2 becomes the new storage version we will point to hack folder + // where v1beta2 is marked as the storage version + // filepath.Join("..", "config", "crd", "bases"), + filepath.Join("..", "config", "crd", "hack"), + // We need to install the ConsolePlugin CRD to test setup of our Network Console Plugin + filepath.Join("..", "vendor", "github.com", "openshift", "api", "console", "v1alpha1"), + filepath.Join("..", "vendor", "github.com", "openshift", "api", "config", "v1"), + filepath.Join("..", "vendor", "github.com", "openshift", "api", "operator", "v1"), + filepath.Join("..", "test-assets"), + }, + CleanUpAfterUse: true, }, ErrorIfCRDPathMissing: true, } @@ -103,9 +109,6 @@ var _ = BeforeSuite(func() { Expect(err).NotTo(HaveOccurred()) Expect(cfg).NotTo(BeNil()) - err = flowsv1alpha1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - err = flowsv1beta1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md index 2ff0298fc..e08a24c4a 100644 --- a/docs/FlowCollector.md +++ b/docs/FlowCollector.md @@ -23,7 +23,8 @@ Resource Types: -FlowCollector is the Schema for the flowcollectors API, which pilots and configures netflow collection. +FlowCollector is the Schema for the flowcollectors API, which pilots and configures netflow collection. + Deprecated: This package will be removed in one of the next releases.
namespace string - Namespace of the config map or secret containing the file. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.

Default:
diff --git a/main.go b/main.go index b33ee6052..c4f5fb0ed 100644 --- a/main.go +++ b/main.go @@ -46,7 +46,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" - flowsv1alpha1 "github.com/netobserv/network-observability-operator/api/v1alpha1" flowsv1beta1 "github.com/netobserv/network-observability-operator/api/v1beta1" flowsv1beta2 "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers" @@ -65,7 +64,6 @@ var ( func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - utilruntime.Must(flowsv1alpha1.AddToScheme(scheme)) utilruntime.Must(flowsv1beta1.AddToScheme(scheme)) utilruntime.Must(flowsv1beta2.AddToScheme(scheme)) utilruntime.Must(corev1.AddToScheme(scheme)) @@ -143,7 +141,7 @@ func main() { os.Exit(1) } if err = (&flowsv1beta2.FlowCollector{}).SetupWebhookWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create v1beta1 webhook", "webhook", "FlowCollector") + setupLog.Error(err, "unable to create v1beta2 webhook", "webhook", "FlowCollector") os.Exit(1) } //+kubebuilder:scaffold:builder diff --git a/pkg/cleanup/cleanup_test.go b/pkg/cleanup/cleanup_test.go index bcccdd71b..e9a67e477 100644 --- a/pkg/cleanup/cleanup_test.go +++ b/pkg/cleanup/cleanup_test.go @@ -17,7 +17,7 @@ var oldDashboard = corev1.ConfigMap{ Name: "grafana-dashboard-netobserv", Namespace: "openshift-config-managed", OwnerReferences: []v1.OwnerReference{{ - APIVersion: "flows.netobserv.io/v1beta1", + APIVersion: "flows.netobserv.io/v1beta2", Kind: "FlowCollector", Name: "cluster", Controller: pointer.Bool(true), @@ -74,7 +74,7 @@ func TestCleanPastReferences_DifferentOwner(t *testing.T) { clientMock := test.ClientMock{} unmanaged := oldDashboard unmanaged.OwnerReferences = []v1.OwnerReference{{ - APIVersion: "something/v1beta1", + APIVersion: "something/v1beta2", Kind: "SomethingElse", Name: "SomethingElse", }} From 0397cf9d75af872131076b0fdedf54849d6b4b75 Mon Sep 17 00:00:00 2001 From: Julien Pinsonneau Date: Fri, 6 Oct 2023 11:33:32 +0200 Subject: [PATCH 15/17] moved hack crd & set webhook path for tests --- controllers/suite_test.go | 7 ++++++- .../cloned.flows.netobserv.io_flowcollectors.yaml | 0 2 files changed, 6 insertions(+), 1 deletion(-) rename config/crd/hack/flows.netobserv.io_flowcollectors.yaml => hack/cloned.flows.netobserv.io_flowcollectors.yaml (100%) diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 1aa4d4d29..cd4be20b3 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -93,7 +93,7 @@ var _ = BeforeSuite(func() { // FIXME: till v1beta2 becomes the new storage version we will point to hack folder // where v1beta2 is marked as the storage version // filepath.Join("..", "config", "crd", "bases"), - filepath.Join("..", "config", "crd", "hack"), + filepath.Join("..", "hack"), // We need to install the ConsolePlugin CRD to test setup of our Network Console Plugin filepath.Join("..", "vendor", "github.com", "openshift", "api", "console", "v1alpha1"), filepath.Join("..", "vendor", "github.com", "openshift", "api", "config", "v1"), @@ -101,6 +101,11 @@ var _ = BeforeSuite(func() { filepath.Join("..", "test-assets"), }, CleanUpAfterUse: true, + WebhookOptions: envtest.WebhookInstallOptions{ + Paths: []string{ + filepath.Join("..", "config", "webhook"), + }, + }, }, ErrorIfCRDPathMissing: true, } diff --git a/config/crd/hack/flows.netobserv.io_flowcollectors.yaml b/hack/cloned.flows.netobserv.io_flowcollectors.yaml similarity index 100% rename from config/crd/hack/flows.netobserv.io_flowcollectors.yaml rename to hack/cloned.flows.netobserv.io_flowcollectors.yaml From aeb6034cc0eba6decfc4aaaa0ecbdc1b2cef1926 Mon Sep 17 00:00:00 2001 From: Julien Pinsonneau <91894519+jpinsonneau@users.noreply.github.com> Date: Tue, 10 Oct 2023 11:23:39 +0200 Subject: [PATCH 16/17] Update api/v1beta2/flowcollector_types.go Co-authored-by: Joel Takvorian --- api/v1beta2/flowcollector_types.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/v1beta2/flowcollector_types.go b/api/v1beta2/flowcollector_types.go index 3a7e019d8..9644f8b24 100644 --- a/api/v1beta2/flowcollector_types.go +++ b/api/v1beta2/flowcollector_types.go @@ -845,7 +845,7 @@ const ( // `FlowCollectorExporter` defines an additional exporter to send enriched flows to. type FlowCollectorExporter struct { - // `type` selects the type of exporters. The available options are `KAFKA` and `IPFIX`. `IPFIX` is unsupported (*). + // `type` selects the type of exporters. The available options are `KAFKA` and `IPFIX`. // +unionDiscriminator // +kubebuilder:validation:Enum:="KAFKA";"IPFIX" // +kubebuilder:validation:Required @@ -855,7 +855,7 @@ type FlowCollectorExporter struct { // +optional Kafka FlowCollectorKafka `json:"kafka,omitempty"` - // IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. [Unsupported (*)]. + // IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. // +optional IPFIX FlowCollectorIPFIXReceiver `json:"ipfix,omitempty"` } From 3de9f1ac1469f9d176509935d09f58c002c20f54 Mon Sep 17 00:00:00 2001 From: Julien Pinsonneau Date: Tue, 10 Oct 2023 11:30:03 +0200 Subject: [PATCH 17/17] addressed feedback + update bundle --- api/v1beta1/flowcollector_types.go | 2 +- api/v1beta1/flowcollector_webhook.go | 12 ------------ api/v1beta2/flowcollector_types.go | 2 +- .../flows.netobserv.io_flowcollectors.yaml | 8 ++++---- .../bases/flows.netobserv.io_flowcollectors.yaml | 8 ++++---- docs/FlowCollector.md | 14 +++++++------- hack/cloned.flows.netobserv.io_flowcollectors.yaml | 10 +++++----- 7 files changed, 22 insertions(+), 34 deletions(-) diff --git a/api/v1beta1/flowcollector_types.go b/api/v1beta1/flowcollector_types.go index f03dc0765..4047aed38 100644 --- a/api/v1beta1/flowcollector_types.go +++ b/api/v1beta1/flowcollector_types.go @@ -57,7 +57,7 @@ type FlowCollectorSpec struct { // enriches them, generates metrics, and forwards them to the Loki persistence layer and/or any available exporter. Processor FlowCollectorFLP `json:"processor,omitempty"` - // loki, the flow store, client settings. + // `loki`, the flow store, client settings. Loki FlowCollectorLoki `json:"loki,omitempty"` // `consolePlugin` defines the settings related to the OpenShift Console plugin, when available. diff --git a/api/v1beta1/flowcollector_webhook.go b/api/v1beta1/flowcollector_webhook.go index 8679568d8..4ce18ca3b 100644 --- a/api/v1beta1/flowcollector_webhook.go +++ b/api/v1beta1/flowcollector_webhook.go @@ -57,18 +57,6 @@ func (r *FlowCollector) ConvertTo(dstRaw conversion.Hub) error { return err } - // Processor - dst.Spec.Processor.LogTypes = restored.Spec.Processor.LogTypes - if restored.Spec.Processor.ConversationHeartbeatInterval != nil { - dst.Spec.Processor.ConversationHeartbeatInterval = restored.Spec.Processor.ConversationHeartbeatInterval - } - if restored.Spec.Processor.ConversationEndTimeout != nil { - dst.Spec.Processor.ConversationEndTimeout = restored.Spec.Processor.ConversationEndTimeout - } - if restored.Spec.Processor.Metrics.DisableAlerts != nil { - dst.Spec.Processor.Metrics.DisableAlerts = restored.Spec.Processor.Metrics.DisableAlerts - } - // Loki dst.Spec.Loki.Mode = restored.Spec.Loki.Mode dst.Spec.Loki.Manual = restored.Spec.Loki.Manual diff --git a/api/v1beta2/flowcollector_types.go b/api/v1beta2/flowcollector_types.go index 9644f8b24..07bfa0f0f 100644 --- a/api/v1beta2/flowcollector_types.go +++ b/api/v1beta2/flowcollector_types.go @@ -57,7 +57,7 @@ type FlowCollectorSpec struct { // enriches them, generates metrics, and forwards them to the Loki persistence layer and/or any available exporter. Processor FlowCollectorFLP `json:"processor,omitempty"` - // loki, the flow store, client settings. + // `loki`, the flow store, client settings. Loki FlowCollectorLoki `json:"loki,omitempty"` // `consolePlugin` defines the settings related to the OpenShift Console plugin, when available. diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml index 1584f99ac..ffa7beced 100644 --- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml +++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml @@ -3708,7 +3708,7 @@ spec: - topic type: object loki: - description: loki, the flow store, client settings. + description: '`loki`, the flow store, client settings.' properties: authToken: default: DISABLED @@ -5967,7 +5967,7 @@ spec: properties: ipfix: description: IPFIX configuration, such as the IP address and - port to send enriched IPFIX flows to. [Unsupported (*)]. + port to send enriched IPFIX flows to. properties: targetHost: default: "" @@ -6165,7 +6165,7 @@ spec: type: object type: description: '`type` selects the type of exporters. The available - options are `KAFKA` and `IPFIX`. `IPFIX` is unsupported (*).' + options are `KAFKA` and `IPFIX`.' enum: - KAFKA - IPFIX @@ -6343,7 +6343,7 @@ spec: - topic type: object loki: - description: loki, the flow store, client settings. + description: '`loki`, the flow store, client settings.' properties: batchSize: default: 102400 diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml index 4a8e7c073..670d8889e 100644 --- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml +++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml @@ -3694,7 +3694,7 @@ spec: - topic type: object loki: - description: loki, the flow store, client settings. + description: '`loki`, the flow store, client settings.' properties: authToken: default: DISABLED @@ -5953,7 +5953,7 @@ spec: properties: ipfix: description: IPFIX configuration, such as the IP address and - port to send enriched IPFIX flows to. [Unsupported (*)]. + port to send enriched IPFIX flows to. properties: targetHost: default: "" @@ -6151,7 +6151,7 @@ spec: type: object type: description: '`type` selects the type of exporters. The available - options are `KAFKA` and `IPFIX`. `IPFIX` is unsupported (*).' + options are `KAFKA` and `IPFIX`.' enum: - KAFKA - IPFIX @@ -6329,7 +6329,7 @@ spec: - topic type: object loki: - description: loki, the flow store, client settings. + description: '`loki`, the flow store, client settings.' properties: batchSize: default: 102400 diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md index e08a24c4a..991a1ccc0 100644 --- a/docs/FlowCollector.md +++ b/docs/FlowCollector.md @@ -4071,7 +4071,7 @@ Defines the desired state of the FlowCollector resource.

*: the mention
@@ -6526,7 +6526,7 @@ TLS client configuration. When using TLS, verify that the address matches the Ka -loki, the flow store, client settings. +`loki`, the flow store, client settings.
loki object - loki, the flow store, client settings.
+ `loki`, the flow store, client settings.
false
@@ -8699,7 +8699,7 @@ Defines the desired state of the FlowCollector resource.

*: the mention @@ -10341,7 +10341,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -10350,7 +10350,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -10369,7 +10369,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. -IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. [Unsupported (*)]. +IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to.
loki object - loki, the flow store, client settings.
+ `loki`, the flow store, client settings.
false
type enum - `type` selects the type of exporters. The available options are `KAFKA` and `IPFIX`. `IPFIX` is unsupported (*).
+ `type` selects the type of exporters. The available options are `KAFKA` and `IPFIX`.

Enum: KAFKA, IPFIX
ipfix object - IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. [Unsupported (*)].
+ IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to.
false
@@ -11154,7 +11154,7 @@ TLS client configuration. When using TLS, verify that the address matches the Ka -loki, the flow store, client settings. +`loki`, the flow store, client settings.
diff --git a/hack/cloned.flows.netobserv.io_flowcollectors.yaml b/hack/cloned.flows.netobserv.io_flowcollectors.yaml index 0edbd9e3a..014ae01bf 100644 --- a/hack/cloned.flows.netobserv.io_flowcollectors.yaml +++ b/hack/cloned.flows.netobserv.io_flowcollectors.yaml @@ -1244,7 +1244,7 @@ spec: - topic type: object loki: - description: loki, the flow store, client settings. + description: '`loki`, the flow store, client settings.' properties: authToken: default: DISABLED @@ -3694,7 +3694,7 @@ spec: - topic type: object loki: - description: loki, the flow store, client settings. + description: '`loki`, the flow store, client settings.' properties: authToken: default: DISABLED @@ -5953,7 +5953,7 @@ spec: properties: ipfix: description: IPFIX configuration, such as the IP address and - port to send enriched IPFIX flows to. [Unsupported (*)]. + port to send enriched IPFIX flows to. properties: targetHost: default: "" @@ -6151,7 +6151,7 @@ spec: type: object type: description: '`type` selects the type of exporters. The available - options are `KAFKA` and `IPFIX`. `IPFIX` is unsupported (*).' + options are `KAFKA` and `IPFIX`.' enum: - KAFKA - IPFIX @@ -6329,7 +6329,7 @@ spec: - topic type: object loki: - description: loki, the flow store, client settings. + description: '`loki`, the flow store, client settings.' properties: batchSize: default: 102400